Merge tag 'v4.5-rc5' into efi/core, before queueing up new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 65b3eac..ff49cf9 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -7,7 +7,7 @@
 conventions of cgroup v2.  It describes all userland-visible aspects
 of cgroup including core and specific controller behaviors.  All
 future changes must be reflected in this document.  Documentation for
-v1 is available under Documentation/cgroup-legacy/.
+v1 is available under Documentation/cgroup-v1/.
 
 CONTENTS
 
@@ -843,6 +843,10 @@
 		Amount of memory used to cache filesystem data,
 		including tmpfs and shared memory.
 
+	  sock
+
+		Amount of memory used in network transmission buffers
+
 	  file_mapped
 
 		Amount of cached filesystem data mapped with mmap()
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
index ace0599..20df350 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
@@ -30,7 +30,7 @@
 clock-output-names:
  - "xin24m" - crystal input - required,
  - "ext_i2s" - external I2S clock - optional,
- - "ext_gmac" - external GMAC clock - optional
+ - "rmii_clkin" - external EMAC clock - optional
 
 Example: Clock controller node:
 
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 7803e77..007a5b4 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -24,9 +24,8 @@
 		1 = edge triggered
 		4 = level triggered
 
-  Cells 4 and beyond are reserved for future use. When the 1st cell
-  has a value of 0 or 1, cells 4 and beyond act as padding, and may be
-  ignored. It is recommended that padding cells have a value of 0.
+  Cells 4 and beyond are reserved for future use and must have a value
+  of 0 if present.
 
 - reg : Specifies base physical address(s) and size of the GIC
   registers, in the following order:
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index 4e8b90e..07a7509 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -8,6 +8,7 @@
 Required properties:
 - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
 	      "renesas,pci-r8a7791" for the R8A7791 SoC;
+	      "renesas,pci-r8a7793" for the R8A7793 SoC;
 	      "renesas,pci-r8a7794" for the R8A7794 SoC;
 	      "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
 
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 558fe52..6cf9969 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -4,6 +4,7 @@
 compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
 	    "renesas,pcie-r8a7790" for the R8A7790 SoC;
 	    "renesas,pcie-r8a7791" for the R8A7791 SoC;
+	    "renesas,pcie-r8a7793" for the R8A7793 SoC;
 	    "renesas,pcie-r8a7795" for the R8A7795 SoC;
 	    "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
 
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index ac2fcd6..1068ffc 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -14,6 +14,10 @@
   interrupt number is the rtc alarm interrupt and second interrupt number
   is the rtc tick interrupt. The number of cells representing a interrupt
   depends on the parent interrupt controller.
+- clocks: Must contain a list of phandle and clock specifier for the rtc
+          and source clocks.
+- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
+               same order as the clocks property.
 
 Example:
 
@@ -21,4 +25,6 @@
 		compatible = "samsung,s3c6410-rtc";
 		reg = <0x10070000 0x100>;
 		interrupts = <44 0 45 0>;
+		clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
+		clock-names = "rtc", "rtc_src";
 	};
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 35ae1fb..ed94c21 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,7 +9,7 @@
 - fsl,uart-has-rtscts : Indicate the uart has rts and cts
 - fsl,irda-mode : Indicate the uart supports irda mode
 - fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
-                  is DCE mode by default.
+                  in DCE mode by default.
 
 Note: Each uart controller should have an alias correctly numbered
 in "aliases" node.
diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
index ce55c0a..4da41bf 100644
--- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
@@ -30,6 +30,8 @@
  "fsl,imx-audio-sgtl5000"
  (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
 
+ "fsl,imx-audio-wm8960"
+
 Required properties:
 
   - compatible		: Contains one of entries in the compatible list.
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index 332e625..e5ee3f1 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -1,8 +1,9 @@
 * Renesas R-Car Thermal
 
 Required properties:
-- compatible		: "renesas,thermal-<soctype>", "renesas,rcar-thermal"
-			  as fallback.
+- compatible		: "renesas,thermal-<soctype>",
+			   "renesas,rcar-gen2-thermal" (with thermal-zone) or
+			   "renesas,rcar-thermal" (without thermal-zone) as fallback.
 			  Examples with soctypes are:
 			    - "renesas,thermal-r8a73a4" (R-Mobile APE6)
 			    - "renesas,thermal-r8a7779" (R-Car H1)
@@ -36,3 +37,35 @@
 		0xe61f0300 0x38>;
 	interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 };
+
+Example (with thermal-zone):
+
+thermal-zones {
+	cpu_thermal: cpu-thermal {
+		polling-delay-passive	= <1000>;
+		polling-delay		= <5000>;
+
+		thermal-sensors = <&thermal>;
+
+		trips {
+			cpu-crit {
+				temperature	= <115000>;
+				hysteresis	= <0>;
+				type		= "critical";
+			};
+		};
+		cooling-maps {
+		};
+	};
+};
+
+thermal: thermal@e61f0000 {
+	compatible =	"renesas,thermal-r8a7790",
+			"renesas,rcar-gen2-thermal",
+			"renesas,rcar-thermal";
+	reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
+	interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
+	power-domains = <&cpg_clocks>;
+	#thermal-sensor-cells = <0>;
+};
diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
index c477af0..686a64b 100644
--- a/Documentation/filesystems/efivarfs.txt
+++ b/Documentation/filesystems/efivarfs.txt
@@ -14,3 +14,10 @@
 efivarfs is typically mounted like this,
 
 	mount -t efivarfs none /sys/firmware/efi/efivars
+
+Due to the presence of numerous firmware bugs where removing non-standard
+UEFI variables causes the system firmware to fail to POST, efivarfs
+files that are not well-known standardized variables are created
+as immutable files.  This doesn't prevent removal - "chattr -i" will work -
+but it does prevent this kind of failure from being accomplished
+accidentally.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index fde9fd0..843b045b 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -240,8 +240,8 @@
  RssFile                     size of resident file mappings
  RssShmem                    size of resident shmem memory (includes SysV shm,
                              mapping of tmpfs and shared anonymous mappings)
- VmData                      size of data, stack, and text segments
- VmStk                       size of data, stack, and text segments
+ VmData                      size of private data segments
+ VmStk                       size of stack segments
  VmExe                       size of text segment
  VmLib                       size of shared library code
  VmPTE                       size of page table entries
@@ -356,7 +356,7 @@
 a7cb1000-a7cb2000 ---p 00000000 00:00 0
 a7cb2000-a7eb2000 rw-p 00000000 00:00 0
 a7eb2000-a7eb3000 ---p 00000000 00:00 0
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0          [stack:1001]
+a7eb3000-a7ed5000 rw-p 00000000 00:00 0
 a7ed5000-a8008000 r-xp 00000000 03:00 4222       /lib/libc.so.6
 a8008000-a800a000 r--p 00133000 03:00 4222       /lib/libc.so.6
 a800a000-a800b000 rw-p 00135000 03:00 4222       /lib/libc.so.6
@@ -388,7 +388,6 @@
 
  [heap]                   = the heap of the program
  [stack]                  = the stack of the main process
- [stack:1001]             = the stack of the thread with tid 1001
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
 
@@ -396,10 +395,8 @@
 
 The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
 of the individual tasks of a process. In this file you will see a mapping marked
-as [stack] if that task sees it as a stack. This is a key difference from the
-content of /proc/PID/maps, where you will see all mappings that are being used
-as stack by all of those tasks. Hence, for the example above, the task-level
-map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
+as [stack] if that task sees it as a stack. Hence, for the example above, the
+task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
 
 08048000-08049000 r-xp 00000000 03:00 8312       /opt/test
 08049000-0804a000 rw-p 00001000 03:00 8312       /opt/test
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 87d40a7..9a53c92 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1496,6 +1496,11 @@
 			could change it dynamically, usually by
 			/sys/module/printk/parameters/ignore_loglevel.
 
+	ignore_rlimit_data
+			Ignore RLIMIT_DATA setting for data mappings,
+			print warning at first misuse.  Can be changed via
+			/sys/module/kernel/parameters/ignore_rlimit_data.
+
 	ihash_entries=	[KNL]
 			Set number of hash buckets for inode cache.
 
@@ -4230,6 +4235,17 @@
 			The default value of this parameter is determined by
 			the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
 
+	workqueue.debug_force_rr_cpu
+			Workqueue used to implicitly guarantee that work
+			items queued without explicit CPU specified are put
+			on the local CPU.  This guarantee is no longer true
+			and while local CPU is still preferred work items
+			may be put on foreign CPUs.  This debug option
+			forces round-robin CPU selection to flush out
+			usages which depend on the now broken guarantee.
+			When enabled, memory and cache locality will be
+			impacted.
+
 	x2apic_phys	[X86-64,APIC] Use x2apic physical mode instead of
 			default x2apic cluster mode on platforms
 			supporting x2apic.
diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt
index 767392f..a484d2c 100644
--- a/Documentation/timers/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -1,9 +1,7 @@
 		High Precision Event Timer Driver for Linux
 
 The High Precision Event Timer (HPET) hardware follows a specification
-by Intel and Microsoft which can be found at
-
-	http://www.intel.com/hardwaredesign/hpetspec_1.pdf
+by Intel and Microsoft, revision 1.
 
 Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision")
 and up to 32 comparators.  Normally three or more comparators are provided,
diff --git a/MAINTAINERS b/MAINTAINERS
index f678c37..4978dc1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -223,9 +223,7 @@
 
 ABI/API
 L:	linux-api@vger.kernel.org
-F:	Documentation/ABI/
 F:	include/linux/syscalls.h
-F:	include/uapi/
 F:	kernel/sys_ni.c
 
 ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
@@ -686,13 +684,6 @@
 S:	Supported
 F:	drivers/macintosh/ams/
 
-AMSO1100 RNIC DRIVER
-M:	Tom Tucker <tom@opengridcomputing.com>
-M:	Steve Wise <swise@opengridcomputing.com>
-L:	linux-rdma@vger.kernel.org
-S:	Maintained
-F:	drivers/infiniband/hw/amso1100/
-
 ANALOG DEVICES INC AD9389B DRIVER
 M:	Hans Verkuil <hans.verkuil@cisco.com>
 L:	linux-media@vger.kernel.org
@@ -967,6 +958,8 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-highbank/
+F:	arch/arm/boot/dts/highbank.dts
+F:	arch/arm/boot/dts/ecx-*.dts*
 
 ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT
 M:	Krzysztof Halasa <khalasa@piap.pl>
@@ -1042,6 +1035,7 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
 S:	Maintained
+F:	arch/arm/boot/dts/prima2*
 F:	arch/arm/mach-prima2/
 F:	drivers/clk/sirf/
 F:	drivers/clocksource/timer-prima2.c
@@ -1143,6 +1137,10 @@
 S:	Supported
 T:	git git://github.com/hisilicon/linux-hisi.git
 F:	arch/arm/mach-hisi/
+F:	arch/arm/boot/dts/hi3*
+F:	arch/arm/boot/dts/hip*
+F:	arch/arm/boot/dts/hisi*
+F:	arch/arm64/boot/dts/hisilicon/
 
 ARM/HP JORNADA 7XX MACHINE SUPPORT
 M:	Kristoffer Ericson <kristoffer.ericson@gmail.com>
@@ -1219,6 +1217,7 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-keystone/
+F:	arch/arm/boot/dts/k2*
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
 ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
@@ -1287,6 +1286,7 @@
 S:	Maintained
 F:	arch/arm/mach-berlin/
 F:	arch/arm/boot/dts/berlin*
+F:	arch/arm64/boot/dts/marvell/berlin*
 
 
 ARM/Marvell Dove/MV78xx0/Orion SOC support
@@ -1425,6 +1425,7 @@
 F:	arch/arm/boot/dts/qcom-*.dts
 F:	arch/arm/boot/dts/qcom-*.dtsi
 F:	arch/arm/mach-qcom/
+F:	arch/arm64/boot/dts/qcom/*
 F:	drivers/soc/qcom/
 F:	drivers/tty/serial/msm_serial.h
 F:	drivers/tty/serial/msm_serial.c
@@ -1441,8 +1442,8 @@
 ARM/RENESAS ARM64 ARCHITECTURE
 M:	Simon Horman <horms@verge.net.au>
 M:	Magnus Damm <magnus.damm@gmail.com>
-L:	linux-sh@vger.kernel.org
-Q:	http://patchwork.kernel.org/project/linux-sh/list/
+L:	linux-renesas-soc@vger.kernel.org
+Q:	http://patchwork.kernel.org/project/linux-renesas-soc/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 S:	Supported
 F:	arch/arm64/boot/dts/renesas/
@@ -1484,6 +1485,8 @@
 L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/boot/dts/s3c*
+F:	arch/arm/boot/dts/s5p*
+F:	arch/arm/boot/dts/samsung*
 F:	arch/arm/boot/dts/exynos*
 F:	arch/arm64/boot/dts/exynos/
 F:	arch/arm/plat-samsung/
@@ -1563,6 +1566,7 @@
 F:	arch/arm/mach-socfpga/
 F:	arch/arm/boot/dts/socfpga*
 F:	arch/arm/configs/socfpga_defconfig
+F:	arch/arm64/boot/dts/altera/
 W:	http://www.rocketboards.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 
@@ -1716,7 +1720,7 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/boot/dts/vexpress*
-F:	arch/arm64/boot/dts/arm/vexpress*
+F:	arch/arm64/boot/dts/arm/
 F:	arch/arm/mach-vexpress/
 F:	*/*/vexpress*
 F:	*/*/*/vexpress*
@@ -2343,6 +2347,7 @@
 F:	arch/arm/boot/dts/bcm113*
 F:	arch/arm/boot/dts/bcm216*
 F:	arch/arm/boot/dts/bcm281*
+F:	arch/arm64/boot/dts/broadcom/
 F:	arch/arm/configs/bcm_defconfig
 F:	drivers/mmc/host/sdhci-bcm-kona.c
 F:	drivers/clocksource/bcm_kona_timer.c
@@ -2357,14 +2362,6 @@
 S:	Maintained
 N:	bcm2835
 
-BROADCOM BCM33XX MIPS ARCHITECTURE
-M:	Kevin Cernekee <cernekee@gmail.com>
-L:	linux-mips@linux-mips.org
-S:	Maintained
-F:	arch/mips/bcm3384/*
-F:	arch/mips/include/asm/mach-bcm3384/*
-F:	arch/mips/kernel/*bmips*
-
 BROADCOM BCM47XX MIPS ARCHITECTURE
 M:	Hauke Mehrtens <hauke@hauke-m.de>
 M:	Rafał Miłecki <zajec5@gmail.com>
@@ -3445,7 +3442,7 @@
 F:	drivers/usb/dwc2/
 
 DESIGNWARE USB3 DRD IP DRIVER
-M:	Felipe Balbi <balbi@ti.com>
+M:	Felipe Balbi <balbi@kernel.org>
 L:	linux-usb@vger.kernel.org
 L:	linux-omap@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -4184,13 +4181,6 @@
 S:	Orphan
 F:	fs/efs/
 
-EHCA (IBM GX bus InfiniBand adapter) DRIVER
-M:	Hoang-Nam Nguyen <hnguyen@de.ibm.com>
-M:	Christoph Raisch <raisch@de.ibm.com>
-L:	linux-rdma@vger.kernel.org
-S:	Supported
-F:	drivers/infiniband/hw/ehca/
-
 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
 M:	Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 L:	netdev@vger.kernel.org
@@ -5809,12 +5799,6 @@
 S:	Maintained
 F:	net/ipv4/netfilter/ipt_MASQUERADE.c
 
-IPATH DRIVER
-M:	Mike Marciniszyn <infinipath@intel.com>
-L:	linux-rdma@vger.kernel.org
-S:	Maintained
-F:	drivers/staging/rdma/ipath/
-
 IPMI SUBSYSTEM
 M:	Corey Minyard <minyard@acm.org>
 L:	openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -6144,7 +6128,7 @@
 
 KERNEL SELFTEST FRAMEWORK
 M:	Shuah Khan <shuahkh@osg.samsung.com>
-L:	linux-api@vger.kernel.org
+L:	linux-kselftest@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/shuah/linux-kselftest
 S:	Maintained
 F:	tools/testing/selftests
@@ -7370,7 +7354,7 @@
 F:	include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M:	Felipe Balbi <balbi@ti.com>
+M:	Felipe Balbi <balbi@kernel.org>
 L:	linux-usb@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:	Maintained
@@ -7939,7 +7923,7 @@
 F:	drivers/staging/media/omap4iss/
 
 OMAP USB SUPPORT
-M:	Felipe Balbi <balbi@ti.com>
+M:	Felipe Balbi <balbi@kernel.org>
 L:	linux-usb@vger.kernel.org
 L:	linux-omap@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -8818,6 +8802,7 @@
 T:	git git://github.com/hzhuang1/linux.git
 T:	git git://github.com/rjarzmik/linux.git
 S:	Maintained
+F:	arch/arm/boot/dts/pxa*
 F:	arch/arm/mach-pxa/
 F:	drivers/dma/pxa*
 F:	drivers/pcmcia/pxa2xx*
@@ -8847,6 +8832,7 @@
 T:	git git://github.com/hzhuang1/linux.git
 T:	git git://git.linaro.org/people/ycmiao/pxa-linux.git
 S:	Maintained
+F:	arch/arm/boot/dts/mmp*
 F:	arch/arm/mach-mmp/
 
 PXA MMCI DRIVER
@@ -9793,10 +9779,11 @@
 F:	drivers/scsi/be2iscsi/
 
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
-M:	Sathya Perla <sathya.perla@avagotech.com>
-M:	Ajit Khaparde <ajit.khaparde@avagotech.com>
-M:	Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
-M:	Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+M:	Sathya Perla <sathya.perla@broadcom.com>
+M:	Ajit Khaparde <ajit.khaparde@broadcom.com>
+M:	Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
+M:	Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+M:	Somnath Kotur <somnath.kotur@broadcom.com>
 L:	netdev@vger.kernel.org
 W:	http://www.emulex.com
 S:	Supported
@@ -10158,6 +10145,7 @@
 F:	drivers/media/pci/solo6x10/
 
 SOFTWARE RAID (Multiple Disks) SUPPORT
+M:	Shaohua Li <shli@kernel.org>
 L:	linux-raid@vger.kernel.org
 T:	git git://neil.brown.name/md
 S:	Supported
@@ -10291,6 +10279,7 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
 S:	Maintained
+F:	arch/arm/boot/dts/spear*
 F:	arch/arm/mach-spear/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
@@ -11318,7 +11307,7 @@
 F:	drivers/usb/host/ehci*
 
 USB GADGET/PERIPHERAL SUBSYSTEM
-M:	Felipe Balbi <balbi@ti.com>
+M:	Felipe Balbi <balbi@kernel.org>
 L:	linux-usb@vger.kernel.org
 W:	http://www.linux-usb.org/gadget
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -11394,7 +11383,7 @@
 F:	drivers/net/usb/pegasus.*
 
 USB PHY LAYER
-M:	Felipe Balbi <balbi@ti.com>
+M:	Felipe Balbi <balbi@kernel.org>
 L:	linux-usb@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:	Maintained
@@ -12133,7 +12122,7 @@
 F:	drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
-M:	Seth Jennings <sjennings@variantweb.net>
+M:	Seth Jennings <sjenning@redhat.com>
 L:	linux-mm@kvack.org
 S:	Maintained
 F:	mm/zbud.c
@@ -12188,7 +12177,7 @@
 F:	Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
-M:	Seth Jennings <sjennings@variantweb.net>
+M:	Seth Jennings <sjenning@redhat.com>
 L:	linux-mm@kvack.org
 S:	Maintained
 F:	mm/zswap.c
diff --git a/Makefile b/Makefile
index 6c1a3c2..fbe1b92 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc5
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 76dde9d..0655495 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -338,6 +338,19 @@
 
 endchoice
 
+choice
+	prompt "MMU Super Page Size"
+	depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE
+	default ARC_HUGEPAGE_2M
+
+config ARC_HUGEPAGE_2M
+	bool "2MB"
+
+config ARC_HUGEPAGE_16M
+	bool "16MB"
+
+endchoice
+
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
@@ -410,7 +423,7 @@
 	default n
 	depends on !SMP
 
-config ARC_HAS_GRTC
+config ARC_HAS_GFRC
 	bool "SMP synchronized 64-bit cycle counter"
 	default y
 	depends on SMP
@@ -566,6 +579,12 @@
 endmenu	 # "ARC Architecture Configuration"
 
 source "mm/Kconfig"
+
+config FORCE_MAX_ZONEORDER
+	int "Maximum zone order"
+	default "12" if ARC_HUGEPAGE_16M
+	default "11"
+
 source "net/Kconfig"
 source "drivers/Kconfig"
 source "fs/Kconfig"
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index f36c047..7359859 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -16,7 +16,7 @@
 CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
-# CONFIG_ARC_HAS_GRTC is not set
+# CONFIG_ARC_HAS_GFRC is not set
 CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_PREEMPT=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7fac7d8..fdc5be5 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -349,14 +349,13 @@
 	struct cpuinfo_arc_bpu bpu;
 	struct bcr_identity core;
 	struct bcr_isa isa;
-	struct bcr_timer timers;
 	unsigned int vec_base;
 	struct cpuinfo_arc_ccm iccm, dccm;
 	struct {
 		unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
 			     fpu_sp:1, fpu_dp:1, pad2:6,
 			     debug:1, ap:1, smart:1, rtt:1, pad3:4,
-			     pad4:8;
+			     timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
 	} extn;
 	struct bcr_mpy extn_mpy;
 	struct bcr_extn_xymem extn_xymem;
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 258b0e5..1fc18ee 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -30,8 +30,11 @@
 /* Was Intr taken in User Mode */
 #define AUX_IRQ_ACT_BIT_U	31
 
-/* 0 is highest level, but taken by FIRQs, if present in design */
-#define ARCV2_IRQ_DEF_PRIO		0
+/*
+ * User space should be interruptable even by lowest prio interrupt
+ * Safe even if actual interrupt priorities is fewer or even one
+ */
+#define ARCV2_IRQ_DEF_PRIO	15
 
 /* seed value for status register */
 #define ISA_INIT_STATUS_BITS	(STATUS_IE_MASK | STATUS_AD_MASK | \
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 46f4e53..847e3bb 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -39,8 +39,8 @@
 #define CMD_DEBUG_SET_MASK		0x34
 #define CMD_DEBUG_SET_SELECT		0x36
 
-#define CMD_GRTC_READ_LO		0x42
-#define CMD_GRTC_READ_HI		0x43
+#define CMD_GFRC_READ_LO		0x42
+#define CMD_GFRC_READ_HI		0x43
 
 #define CMD_IDU_ENABLE			0x71
 #define CMD_IDU_DISABLE			0x72
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 57af2f0..d426d42 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -179,37 +179,44 @@
 #define __S111  PAGE_U_X_W_R
 
 /****************************************************************
- * Page Table Lookup split
+ * 2 tier (PGD:PTE) software page walker
  *
- * We implement 2 tier paging and since this is all software, we are free
- * to customize the span of a PGD / PTE entry to suit us
- *
- *			32 bit virtual address
+ * [31]		    32 bit virtual address              [0]
  * -------------------------------------------------------
- * | BITS_FOR_PGD    |  BITS_FOR_PTE    |  BITS_IN_PAGE  |
+ * |               | <------------ PGDIR_SHIFT ----------> |
+ * |		   |					 |
+ * | BITS_FOR_PGD  |  BITS_FOR_PTE  | <-- PAGE_SHIFT --> |
  * -------------------------------------------------------
  *       |                  |                |
  *       |                  |                --> off in page frame
- *       |		    |
  *       |                  ---> index into Page Table
- *       |
  *       ----> index into Page Directory
+ *
+ * In a single page size configuration, only PAGE_SHIFT is fixed
+ * So both PGD and PTE sizing can be tweaked
+ *  e.g. 8K page (PAGE_SHIFT 13) can have
+ *  - PGDIR_SHIFT 21  -> 11:8:13 address split
+ *  - PGDIR_SHIFT 24  -> 8:11:13 address split
+ *
+ * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
+ * so the sizing flexibility is gone.
  */
 
-#define BITS_IN_PAGE	PAGE_SHIFT
-
-/* Optimal Sizing of Pg Tbl - based on MMU page size */
-#if defined(CONFIG_ARC_PAGE_SIZE_8K)
-#define BITS_FOR_PTE	8		/* 11:8:13 */
-#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
-#define BITS_FOR_PTE	8		/* 10:8:14 */
-#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
-#define BITS_FOR_PTE	9		/* 11:9:12 */
+#if defined(CONFIG_ARC_HUGEPAGE_16M)
+#define PGDIR_SHIFT	24
+#elif defined(CONFIG_ARC_HUGEPAGE_2M)
+#define PGDIR_SHIFT	21
+#else
+/*
+ * Only Normal page support so "hackable" (see comment above)
+ * Default value provides 11:8:13 (8K), 11:9:12 (4K)
+ */
+#define PGDIR_SHIFT	21
 #endif
 
-#define BITS_FOR_PGD	(32 - BITS_FOR_PTE - BITS_IN_PAGE)
+#define BITS_FOR_PTE	(PGDIR_SHIFT - PAGE_SHIFT)
+#define BITS_FOR_PGD	(32 - PGDIR_SHIFT)
 
-#define PGDIR_SHIFT	(32 - BITS_FOR_PGD)
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)	/* vaddr span, not PDG sz */
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cbfec79..b178302 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -211,7 +211,11 @@
 ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
 ; entry was via Exception in DS which got preempted in kernel).
 ;
-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
+
 .Lintr_ret_to_delay_slot:
 debug_marker_ds:
 
@@ -222,18 +226,23 @@
 	ld	r2, [sp, PT_ret]
 	ld	r3, [sp, PT_status32]
 
+	; STAT32 for Int return created from scratch
+	; (No delay dlot, disable Further intr in trampoline)
+
 	bic  	r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
 	st	r0, [sp, PT_status32]
 
 	mov	r1, .Lintr_ret_to_delay_slot_2
 	st	r1, [sp, PT_ret]
 
+	; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
 	st	r2, [sp, 0]
 	st	r3, [sp, 4]
 
 	b	.Lisr_ret_fast_path
 
 .Lintr_ret_to_delay_slot_2:
+	; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
 	sub	sp, sp, SZ_PT_REGS
 	st	r9, [sp, -4]
 
@@ -243,11 +252,19 @@
 	ld	r9, [sp, 4]
 	sr	r9, [erstatus]
 
+	; restore AUX_USER_SP if returning to U mode
+	bbit0	r9, STATUS_U_BIT, 1f
+	ld	r9, [sp, PT_sp]
+	sr	r9, [AUX_USER_SP]
+
+1:
 	ld	r9, [sp, 8]
 	sr	r9, [erbta]
 
 	ld	r9, [sp, -4]
 	add	sp, sp, SZ_PT_REGS
+
+	; return from pure kernel mode to delay slot
 	rtie
 
 END(ret_from_exception)
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 0394f9f..9425263 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -14,6 +14,8 @@
 #include <linux/irqchip.h>
 #include <asm/irq.h>
 
+static int irq_prio;
+
 /*
  * Early Hardware specific Interrupt setup
  * -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,6 +26,14 @@
 {
 	unsigned int tmp;
 
+	struct irq_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+		unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
+#else
+		unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
+#endif
+	} irq_bcr;
+
 	struct aux_irq_ctrl {
 #ifdef CONFIG_CPU_BIG_ENDIAN
 		unsigned int res3:18, save_idx_regs:1, res2:1,
@@ -46,28 +56,25 @@
 
 	WRITE_AUX(AUX_IRQ_CTRL, ictrl);
 
-	/* setup status32, don't enable intr yet as kernel doesn't want */
-	tmp = read_aux_reg(0xa);
-	tmp |= ISA_INIT_STATUS_BITS;
-	tmp &= ~STATUS_IE_MASK;
-	asm volatile("flag %0	\n"::"r"(tmp));
-
 	/*
 	 * ARCv2 core intc provides multiple interrupt priorities (upto 16).
 	 * Typical builds though have only two levels (0-high, 1-low)
 	 * Linux by default uses lower prio 1 for most irqs, reserving 0 for
 	 * NMI style interrupts in future (say perf)
-	 *
-	 * Read the intc BCR to confirm that Linux default priority is avail
-	 * in h/w
-	 *
-	 * Note:
-	 *  IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
-	 *  is 0 based.
 	 */
-	tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF;
-	if (ARCV2_IRQ_DEF_PRIO > tmp)
-		panic("Linux default irq prio incorrect\n");
+
+	READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
+
+	irq_prio = irq_bcr.prio;	/* Encoded as N-1 for N levels */
+	pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
+		irq_prio + 1, irq_prio,
+		irq_bcr.firq ? " FIRQ (not used)":"");
+
+	/* setup status32, don't enable intr yet as kernel doesn't want */
+	tmp = read_aux_reg(0xa);
+	tmp |= STATUS_AD_MASK | (irq_prio << 1);
+	tmp &= ~STATUS_IE_MASK;
+	asm volatile("flag %0	\n"::"r"(tmp));
 }
 
 static void arcv2_irq_mask(struct irq_data *data)
@@ -86,7 +93,7 @@
 {
 	/* set default priority */
 	write_aux_reg(AUX_IRQ_SELECT, data->irq);
-	write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+	write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
 
 	/*
 	 * hw auto enables (linux unmask) all by default
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index bd237ac..bc771f5 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -96,13 +96,13 @@
 #ifdef CONFIG_CPU_BIG_ENDIAN
 		unsigned int pad3:8,
 			     idu:1, llm:1, num_cores:6,
-			     iocoh:1,  grtc:1, dbg:1, pad2:1,
+			     iocoh:1,  gfrc:1, dbg:1, pad2:1,
 			     msg:1, sem:1, ipi:1, pad:1,
 			     ver:8;
 #else
 		unsigned int ver:8,
 			     pad:1, ipi:1, sem:1, msg:1,
-			     pad2:1, dbg:1, grtc:1, iocoh:1,
+			     pad2:1, dbg:1, gfrc:1, iocoh:1,
 			     num_cores:6, llm:1, idu:1,
 			     pad3:8;
 #endif
@@ -116,7 +116,7 @@
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.idu, "IDU "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
-		IS_AVAIL1(mp.grtc, "GRTC"));
+		IS_AVAIL1(mp.gfrc, "GFRC"));
 
 	idu_detected = mp.idu;
 
@@ -125,8 +125,8 @@
 		__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
 	}
 
-	if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
-		panic("kernel trying to use non-existent GRTC\n");
+	if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
+		panic("kernel trying to use non-existent GFRC\n");
 }
 
 struct plat_smp_ops plat_smp_ops = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index e1b8744..a7edceb 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -45,6 +45,7 @@
 static void read_arc_build_cfg_regs(void)
 {
 	struct bcr_perip uncached_space;
+	struct bcr_timer timer;
 	struct bcr_generic bcr;
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
 	unsigned long perip_space;
@@ -53,7 +54,11 @@
 	READ_BCR(AUX_IDENTITY, cpu->core);
 	READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 
-	READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
+	READ_BCR(ARC_REG_TIMERS_BCR, timer);
+	cpu->extn.timer0 = timer.t0;
+	cpu->extn.timer1 = timer.t1;
+	cpu->extn.rtc = timer.rtc;
+
 	cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 
 	READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
@@ -208,9 +213,9 @@
 		       (unsigned int)(arc_get_core_freq() / 10000) % 100);
 
 	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
-		       IS_AVAIL1(cpu->timers.t0, "Timer0 "),
-		       IS_AVAIL1(cpu->timers.t1, "Timer1 "),
-		       IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ",
+		       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
+		       IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
+		       IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
 				 CONFIG_ARC_HAS_RTC));
 
 	n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
@@ -293,13 +298,13 @@
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
 	int fpu_enabled;
 
-	if (!cpu->timers.t0)
+	if (!cpu->extn.timer0)
 		panic("Timer0 is not present!\n");
 
-	if (!cpu->timers.t1)
+	if (!cpu->extn.timer1)
 		panic("Timer1 is not present!\n");
 
-	if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc)
+	if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
 		panic("RTC is not present\n");
 
 #ifdef CONFIG_ARC_HAS_DCCM
@@ -334,6 +339,7 @@
 		panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
 
 	if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+	    IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
 	    !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
 		panic("llock/scond livelock workaround missing\n");
 }
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index dfad287..156d983 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -62,7 +62,7 @@
 
 /********** Clock Source Device *********/
 
-#ifdef CONFIG_ARC_HAS_GRTC
+#ifdef CONFIG_ARC_HAS_GFRC
 
 static int arc_counter_setup(void)
 {
@@ -83,10 +83,10 @@
 
 	local_irq_save(flags);
 
-	__mcip_cmd(CMD_GRTC_READ_LO, 0);
+	__mcip_cmd(CMD_GFRC_READ_LO, 0);
 	stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
 
-	__mcip_cmd(CMD_GRTC_READ_HI, 0);
+	__mcip_cmd(CMD_GFRC_READ_HI, 0);
 	stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
 
 	local_irq_restore(flags);
@@ -95,7 +95,7 @@
 }
 
 static struct clocksource arc_counter = {
-	.name   = "ARConnect GRTC",
+	.name   = "ARConnect GFRC",
 	.rating = 400,
 	.read   = arc_counter_read,
 	.mask   = CLOCKSOURCE_MASK(64),
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 04885f9..1fafaad 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -439,6 +439,7 @@
 			ti,mbox-num-users = <4>;
 			ti,mbox-num-fifos = <8>;
 			mbox_wkupm3: wkup_m3 {
+				ti,mbox-send-noirq;
 				ti,mbox-tx = <0 0 0>;
 				ti,mbox-rx = <0 0 3>;
 			};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index df955ba..92068fb 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -73,7 +73,7 @@
 	global_timer: timer@48240200 {
 		compatible = "arm,cortex-a9-global-timer";
 		reg = <0x48240200 0x100>;
-		interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
 		interrupt-parent = <&gic>;
 		clocks = <&mpu_periphclk>;
 	};
@@ -81,7 +81,7 @@
 	local_timer: timer@48240600 {
 		compatible = "arm,cortex-a9-twd-timer";
 		reg = <0x48240600 0x100>;
-		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
 		interrupt-parent = <&gic>;
 		clocks = <&mpu_periphclk>;
 	};
@@ -290,6 +290,7 @@
 			ti,mbox-num-users = <4>;
 			ti,mbox-num-fifos = <8>;
 			mbox_wkupm3: wkup_m3 {
+				ti,mbox-send-noirq;
 				ti,mbox-tx = <0 0 0>;
 				ti,mbox-rx = <0 0 3>;
 			};
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 64d4332..ecd09ab 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -590,8 +590,6 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&pixcir_ts_pins>;
 		reg = <0x5c>;
-		interrupt-parent = <&gpio3>;
-		interrupts = <22 0>;
 
 		attb-gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
 
@@ -599,7 +597,7 @@
 		 * 0x264 represents the offset of padconf register of
 		 * gpio3_22 from am43xx_pinmux base.
 		 */
-		interrupts-extended = <&gpio3 22 IRQ_TYPE_NONE>,
+		interrupts-extended = <&gpio3 22 IRQ_TYPE_EDGE_FALLING>,
 				      <&am43xx_pinmux 0x264>;
 		interrupt-names = "tsc", "wakeup";
 
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 746fd2b..d580e2b7 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -491,7 +491,7 @@
 		pinctrl-0 = <&pixcir_ts_pins>;
 		reg = <0x5c>;
 		interrupt-parent = <&gpio1>;
-		interrupts = <17 0>;
+		interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
 
 		attb-gpio = <&gpio1 17 GPIO_ACTIVE_HIGH>;
 
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index c538826..8d93882 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -167,7 +167,7 @@
 			DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d20.rgmii1_rd3 */
 			DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d21.rgmii1_rd2 */
 			DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d22.rgmii1_rd1 */
-			DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLUP | MUX_MODE3)	/* vin2a_d23.rgmii1_rd0 */
+			DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d23.rgmii1_rd0 */
 		>;
 	};
 
@@ -492,14 +492,14 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&qspi1_pins>;
 
-	spi-max-frequency = <20000000>;
+	spi-max-frequency = <48000000>;
 
 	spi_flash: spi_flash@0 {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		compatible = "spansion,m25p80", "jedec,spi-nor";
 		reg = <0>;				/* CS0 */
-		spi-max-frequency = <20000000>;
+		spi-max-frequency = <48000000>;
 
 		partition@0 {
 			label = "uboot";
@@ -559,13 +559,13 @@
 
 &cpsw_emac0 {
 	phy_id = <&davinci_mdio>, <0>;
-	phy-mode = "rgmii";
+	phy-mode = "rgmii-txid";
 	dual_emac_res_vlan = <0>;
 };
 
 &cpsw_emac1 {
 	phy_id = <&davinci_mdio>, <1>;
-	phy-mode = "rgmii";
+	phy-mode = "rgmii-txid";
 	dual_emac_res_vlan = <1>;
 };
 
@@ -588,7 +588,7 @@
 };
 
 &usb2 {
-	dr_mode = "peripheral";
+	dr_mode = "host";
 };
 
 &mcasp3 {
diff --git a/arch/arm/boot/dts/am57xx-sbc-am57x.dts b/arch/arm/boot/dts/am57xx-sbc-am57x.dts
index 77bb8e1..988e996 100644
--- a/arch/arm/boot/dts/am57xx-sbc-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-sbc-am57x.dts
@@ -25,8 +25,8 @@
 &dra7_pmx_core {
 	uart3_pins_default: uart3_pins_default {
 		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_SLEW | MUX_MODE2)	/* uart2_ctsn.uart3_rxd */
-			DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_SLEW | MUX_MODE1)	/* uart2_rtsn.uart3_txd */
+			DRA7XX_CORE_IOPAD(0x3648, PIN_INPUT_SLEW | MUX_MODE0)	/* uart3_rxd */
+			DRA7XX_CORE_IOPAD(0x364c, PIN_INPUT_SLEW | MUX_MODE0)	/* uart3_txd */
 		>;
 	};
 
@@ -108,9 +108,9 @@
 	pinctrl-0 = <&i2c5_pins_default>;
 	clock-frequency = <400000>;
 
-	eeprom_base: atmel@50 {
+	eeprom_base: atmel@54 {
 		compatible = "atmel,24c08";
-		reg = <0x50>;
+		reg = <0x54>;
 		pagesize = <16>;
 	};
 
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index 13cf69a..fb9e1bb 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -152,6 +152,7 @@
 				nand-on-flash-bbt;
 
 				partitions {
+					compatible = "fixed-partitions";
 					#address-cells = <1>;
 					#size-cells = <1>;
 
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 77ddff0..e683856 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -114,9 +114,15 @@
 
 			macb0: ethernet@f8008000 {
 				pinctrl-names = "default";
-				pinctrl-0 = <&pinctrl_macb0_default>;
+				pinctrl-0 = <&pinctrl_macb0_default &pinctrl_macb0_phy_irq>;
 				phy-mode = "rmii";
 				status = "okay";
+
+				ethernet-phy@1 {
+					reg = <0x1>;
+					interrupt-parent = <&pioA>;
+					interrupts = <73 IRQ_TYPE_LEVEL_LOW>;
+				};
 			};
 
 			pdmic@f8018000 {
@@ -300,6 +306,10 @@
 					bias-disable;
 				};
 
+				pinctrl_macb0_phy_irq: macb0_phy_irq {
+					pinmux = <PIN_PC9__GPIO>;
+				};
+
 				pinctrl_pdmic_default: pdmic_default {
 					pinmux = <PIN_PB26__PDMIC_DAT>,
 						<PIN_PB27__PDMIC_CLK>;
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 131614f..569026e 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -86,10 +86,12 @@
 			macb0: ethernet@f8020000 {
 				phy-mode = "rmii";
 				status = "okay";
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
 
 				phy0: ethernet-phy@1 {
 					interrupt-parent = <&pioE>;
-					interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+					interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
 					reg = <1>;
 				};
 			};
@@ -152,6 +154,10 @@
 						atmel,pins =
 							<AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
 					};
+					pinctrl_macb0_phy_irq: macb0_phy_irq_0 {
+						atmel,pins =
+							<AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+					};
 				};
 			};
 		};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 2d4a3310..4e98cda 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -160,8 +160,15 @@
 			};
 
 			macb0: ethernet@f8020000 {
+				pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
 				phy-mode = "rmii";
 				status = "okay";
+
+				ethernet-phy@1 {
+					reg = <0x1>;
+					interrupt-parent = <&pioE>;
+					interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+				};
 			};
 
 			mmc1: mmc@fc000000 {
@@ -193,6 +200,10 @@
 
 			pinctrl@fc06a000 {
 				board {
+					pinctrl_macb0_phy_irq: macb0_phy_irq {
+						atmel,pins =
+							<AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+					};
 					pinctrl_mmc0_cd: mmc0_cd {
 						atmel,pins =
 							<AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index ca4ddf8..626c67d 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -215,7 +215,7 @@
 	};
 
 	panel: panel {
-		compatible = "qd,qd43003c0-40", "simple-panel";
+		compatible = "qiaodian,qd43003c0-40", "simple-panel";
 		backlight = <&backlight>;
 		power-supply = <&panel_reg>;
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/kirkwood-lswvl.dts b/arch/arm/boot/dts/kirkwood-lswvl.dts
index 09eed3cea..36eec73 100644
--- a/arch/arm/boot/dts/kirkwood-lswvl.dts
+++ b/arch/arm/boot/dts/kirkwood-lswvl.dts
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Buffalo Linkstation LS-WVL/VL
  *
- * Copyright (C) 2015, rogershimizu@gmail.com
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -156,21 +157,21 @@
 		button@1 {
 			label = "Function Button";
 			linux,code = <KEY_OPTION>;
-			gpios = <&gpio0 45 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
 		};
 
 		button@2 {
 			label = "Power-on Switch";
 			linux,code = <KEY_RESERVED>;
 			linux,input-type = <5>;
-			gpios = <&gpio0 46 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
 		};
 
 		button@3 {
 			label = "Power-auto Switch";
 			linux,code = <KEY_ESC>;
 			linux,input-type = <5>;
-			gpios = <&gpio0 47 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
 		};
 	};
 
@@ -185,38 +186,38 @@
 
 		led@1 {
 			label = "lswvl:red:alarm";
-			gpios = <&gpio0 36 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
 		};
 
 		led@2 {
 			label = "lswvl:red:func";
-			gpios = <&gpio0 37 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
 		};
 
 		led@3 {
 			label = "lswvl:amber:info";
-			gpios = <&gpio0 38 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
 		};
 
 		led@4 {
 			label = "lswvl:blue:func";
-			gpios = <&gpio0 39 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
 		};
 
 		led@5 {
 			label = "lswvl:blue:power";
-			gpios = <&gpio0 40 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
 			default-state = "keep";
 		};
 
 		led@6 {
 			label = "lswvl:red:hdderr0";
-			gpios = <&gpio0 34 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
 		};
 
 		led@7 {
 			label = "lswvl:red:hdderr1";
-			gpios = <&gpio0 35 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
 		};
 	};
 
@@ -233,7 +234,7 @@
 				3250 1
 				5000 0>;
 
-		alarm-gpios = <&gpio0 43 GPIO_ACTIVE_HIGH>;
+		alarm-gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>;
 	};
 
 	restart_poweroff {
diff --git a/arch/arm/boot/dts/kirkwood-lswxl.dts b/arch/arm/boot/dts/kirkwood-lswxl.dts
index f5db16a..b13ec20 100644
--- a/arch/arm/boot/dts/kirkwood-lswxl.dts
+++ b/arch/arm/boot/dts/kirkwood-lswxl.dts
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Buffalo Linkstation LS-WXL/WSXL
  *
- * Copyright (C) 2015, rogershimizu@gmail.com
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -156,21 +157,21 @@
 		button@1 {
 			label = "Function Button";
 			linux,code = <KEY_OPTION>;
-			gpios = <&gpio1 41 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
 		};
 
 		button@2 {
 			label = "Power-on Switch";
 			linux,code = <KEY_RESERVED>;
 			linux,input-type = <5>;
-			gpios = <&gpio1 42 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
 		};
 
 		button@3 {
 			label = "Power-auto Switch";
 			linux,code = <KEY_ESC>;
 			linux,input-type = <5>;
-			gpios = <&gpio1 43 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
 		};
 	};
 
@@ -185,12 +186,12 @@
 
 		led@1 {
 			label = "lswxl:blue:func";
-			gpios = <&gpio1 36 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
 		};
 
 		led@2 {
 			label = "lswxl:red:alarm";
-			gpios = <&gpio1 49 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
 		};
 
 		led@3 {
@@ -200,23 +201,23 @@
 
 		led@4 {
 			label = "lswxl:blue:power";
-			gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+			default-state = "keep";
 		};
 
 		led@5 {
 			label = "lswxl:red:func";
-			gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
-			default-state = "keep";
+			gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
 		};
 
 		led@6 {
 			label = "lswxl:red:hdderr0";
-			gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
 		};
 
 		led@7 {
 			label = "lswxl:red:hdderr1";
-			gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
 		};
 	};
 
@@ -225,15 +226,15 @@
 		pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
 		pinctrl-names = "default";
 
-		gpios = <&gpio0 47 GPIO_ACTIVE_LOW
-			 &gpio0 48 GPIO_ACTIVE_LOW>;
+		gpios = <&gpio1 16 GPIO_ACTIVE_LOW
+			 &gpio1 15 GPIO_ACTIVE_LOW>;
 
 		gpio-fan,speed-map = <0 3
 				1500 2
 				3250 1
 				5000 0>;
 
-		alarm-gpios = <&gpio1 49 GPIO_ACTIVE_HIGH>;
+		alarm-gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
 	};
 
 	restart_poweroff {
@@ -256,7 +257,7 @@
 			enable-active-high;
 			regulator-always-on;
 			regulator-boot-on;
-			gpio = <&gpio0 37 GPIO_ACTIVE_HIGH>;
+			gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
 		};
 		hdd_power0: regulator@2 {
 			compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts b/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
index 1db6f2c..8082d64 100644
--- a/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
+++ b/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
@@ -131,6 +131,7 @@
 	chip-delay = <40>;
 	status = "okay";
 	partitions {
+		compatible = "fixed-partitions";
 		#address-cells = <1>;
 		#size-cells = <1>;
 
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 7fed0bd..0080532 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -112,14 +112,6 @@
 	clock-frequency = <400000>;
 };
 
-&i2c2 {
-	clock-frequency = <400000>;
-};
-
-&i2c3 {
-	clock-frequency = <400000>;
-};
-
 /*
  * Only found on the wireless SOM. For the SOM without wireless, the pins for
  * MMC3 can be routed with jumpers to the second MMC slot on the devkit and
@@ -143,6 +135,7 @@
 		interrupt-parent = <&gpio5>;
 		interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
 		ref-clock-frequency = <26000000>;
+		tcxo-clock-frequency = <26000000>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 888412c..902657d 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -130,6 +130,16 @@
 	};
 };
 
+&gpio8 {
+	/* TI trees use GPIO instead of msecure, see also muxing */
+	p234 {
+		gpio-hog;
+		gpios = <10 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "gpio8_234/msecure";
+	};
+};
+
 &omap5_pmx_core {
 	pinctrl-names = "default";
 	pinctrl-0 = <
@@ -213,6 +223,13 @@
 		>;
 	};
 
+	/* TI trees use GPIO mode; msecure mode does not work reliably? */
+	palmas_msecure_pins: palmas_msecure_pins {
+		pinctrl-single,pins = <
+			OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */
+		>;
+	};
+
 	usbhost_pins: pinmux_usbhost_pins {
 		pinctrl-single,pins = <
 			OMAP5_IOPAD(0x0c4, PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */
@@ -278,6 +295,12 @@
 			&usbhost_wkup_pins
 	>;
 
+	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+		pinctrl-single,pins = <
+			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+		>;
+	};
+
 	usbhost_wkup_pins: pinmux_usbhost_wkup_pins {
 		pinctrl-single,pins = <
 			OMAP5_IOPAD(0x05a, PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
@@ -345,6 +368,8 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 		ti,system-power-controller;
+		pinctrl-names = "default";
+		pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
 
 		extcon_usb3: palmas_usb {
 			compatible = "ti,palmas-usb-vid";
@@ -358,6 +383,14 @@
 			#clock-cells = <0>;
 		};
 
+		rtc {
+			compatible = "ti,palmas-rtc";
+			interrupt-parent = <&palmas>;
+			interrupts = <8 IRQ_TYPE_NONE>;
+			ti,backup-battery-chargeable;
+			ti,backup-battery-charge-high-current;
+		};
+
 		palmas_pmic {
 			compatible = "ti,palmas-pmic";
 			interrupt-parent = <&palmas>;
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
index 3daec91..4207882 100644
--- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
+++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Buffalo Linkstation LS-WTGL
  *
- * Copyright (C) 2015, Roger Shimizu <rogershimizu@gmail.com>
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
@@ -69,8 +70,6 @@
 
 		internal-regs {
 			pinctrl: pinctrl@10000 {
-				pinctrl-0 = <&pmx_usb_power &pmx_power_hdd
-					&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
 				pinctrl-names = "default";
 
 				pmx_led_power: pmx-leds {
@@ -162,6 +161,7 @@
 		led@1 {
 			label = "lswtgl:blue:power";
 			gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+			default-state = "keep";
 		};
 
 		led@2 {
@@ -188,7 +188,7 @@
 				3250 1
 				5000 0>;
 
-		alarm-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+		alarm-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
 	};
 
 	restart_poweroff {
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index b8032bc..db1151c 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1342,7 +1342,7 @@
 			dbgu: serial@fc069000 {
 				compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
 				reg = <0xfc069000 0x200>;
-				interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
+				interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
 				pinctrl-names = "default";
 				pinctrl-0 = <&pinctrl_dbgu>;
 				clocks = <&dbgu_clk>;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index d0c7438..27a333e 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -127,22 +127,14 @@
 			};
 			mmcsd_default_mode: mmcsd_default {
 				mmcsd_default_cfg1 {
-					/* MCCLK */
-					pins = "GPIO8_B10";
-					ste,output = <0>;
-				};
-				mmcsd_default_cfg2 {
-					/* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
-					pins = "GPIO10_C11", "GPIO15_A12",
-					"GPIO16_C13", "GPIO23_D15";
-					ste,output = <1>;
-				};
-				mmcsd_default_cfg3 {
-					/* MCCMD, MCDAT3-0, MCMSFBCLK */
-					pins = "GPIO9_A10", "GPIO11_B11",
-					"GPIO12_A11", "GPIO13_C12",
-					"GPIO14_B12", "GPIO24_C15";
-					ste,input = <1>;
+					/*
+					 * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
+					 * MCCMD, MCDAT3-0, MCMSFBCLK
+					 */
+					pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
+					       "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
+					       "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
+					ste,output = <2>;
 				};
 			};
 		};
@@ -802,10 +794,21 @@
 			clock-names = "mclk", "apb_pclk";
 			interrupt-parent = <&vica>;
 			interrupts = <22>;
-			max-frequency = <48000000>;
+			max-frequency = <400000>;
 			bus-width = <4>;
 			cap-mmc-highspeed;
 			cap-sd-highspeed;
+			full-pwr-cycle;
+			/*
+			 * The STw4811 circuit used with the Nomadik strictly
+			 * requires that all of these signal direction pins be
+			 * routed and used for its 4-bit levelshifter.
+			 */
+			st,sig-dir-dat0;
+			st,sig-dir-dat2;
+			st,sig-dir-dat31;
+			st,sig-dir-cmd;
+			st,sig-pin-fbclk;
 			pinctrl-names = "default";
 			pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
 			vmmc-supply = <&vmmc_regulator>;
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
index 2dc6da70..d7ed252 100644
--- a/arch/arm/common/icst.c
+++ b/arch/arm/common/icst.c
@@ -16,7 +16,7 @@
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
-
+#include <asm/div64.h>
 #include <asm/hardware/icst.h>
 
 /*
@@ -29,7 +29,11 @@
 
 unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
 {
-	return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+	u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+	u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+	do_div(dividend, divisor);
+	return (unsigned long)dividend;
 }
 
 EXPORT_SYMBOL(icst_hz);
@@ -58,6 +62,7 @@
 
 		if (f > p->vco_min && f <= p->vco_max)
 			break;
+		i++;
 	} while (i < 8);
 
 	if (i >= 8)
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 314f6be..8e8b2ac 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -426,6 +426,7 @@
 CONFIG_IMX2_WDT=y
 CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c5e1943..d18d6b4 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -50,6 +50,7 @@
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
 CONFIG_ARM_THUMBEE=y
+CONFIG_ARM_KERNMEM_PERMS=y
 CONFIG_ARM_ERRATA_411920=y
 CONFIG_ARM_ERRATA_430973=y
 CONFIG_SMP=y
@@ -177,6 +178,7 @@
 CONFIG_AT803X_PHY=y
 CONFIG_SMSC_PHY=y
 CONFIG_USB_USBNET=m
+CONFIG_USB_NET_SMSC75XX=m
 CONFIG_USB_NET_SMSC95XX=m
 CONFIG_USB_ALI_M5632=y
 CONFIG_USB_AN2720=y
@@ -290,24 +292,23 @@
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
-CONFIG_OMAP2_DSS=m
-CONFIG_OMAP5_DSS_HDMI=y
-CONFIG_OMAP2_DSS_SDI=y
-CONFIG_OMAP2_DSS_DSI=y
+CONFIG_FB_OMAP5_DSS_HDMI=y
+CONFIG_FB_OMAP2_DSS_SDI=y
+CONFIG_FB_OMAP2_DSS_DSI=y
 CONFIG_FB_OMAP2=m
-CONFIG_DISPLAY_ENCODER_TFP410=m
-CONFIG_DISPLAY_ENCODER_TPD12S015=m
-CONFIG_DISPLAY_CONNECTOR_DVI=m
-CONFIG_DISPLAY_CONNECTOR_HDMI=m
-CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m
-CONFIG_DISPLAY_PANEL_DPI=m
-CONFIG_DISPLAY_PANEL_DSI_CM=m
-CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m
-CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m
-CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m
-CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m
+CONFIG_FB_OMAP2_ENCODER_TFP410=m
+CONFIG_FB_OMAP2_ENCODER_TPD12S015=m
+CONFIG_FB_OMAP2_CONNECTOR_DVI=m
+CONFIG_FB_OMAP2_CONNECTOR_HDMI=m
+CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m
+CONFIG_FB_OMAP2_PANEL_DPI=m
+CONFIG_FB_OMAP2_PANEL_DSI_CM=m
+CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m
+CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m
+CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m
+CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m
+CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m
+CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=y
@@ -354,6 +355,11 @@
 CONFIG_USB_INVENTRA_DMA=y
 CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_DWC3=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_TEST=m
 CONFIG_AM335X_PHY_USB=y
 CONFIG_USB_GADGET=m
@@ -387,6 +393,7 @@
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_PWM=m
+CONFIG_LEDS_PCA963X=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_ONESHOT=m
@@ -449,6 +456,8 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_SPLIT=y
+CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b445a5d..89a3a3e 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -364,7 +364,7 @@
 	.cra_blkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
+		.ivsize		= 0,
 		.setkey		= ce_aes_setkey,
 		.encrypt	= ecb_encrypt,
 		.decrypt	= ecb_decrypt,
@@ -441,7 +441,7 @@
 	.cra_ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
+		.ivsize		= 0,
 		.setkey		= ablk_set_key,
 		.encrypt	= ablk_encrypt,
 		.decrypt	= ablk_decrypt,
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 9cda974..d7f1d69 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -18,7 +18,6 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/pinctrl/machine.h>
-#include <linux/platform_data/mailbox-omap.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -66,32 +65,6 @@
 }
 omap_postcore_initcall(omap3_l3_init);
 
-#if defined(CONFIG_OMAP2PLUS_MBOX) || defined(CONFIG_OMAP2PLUS_MBOX_MODULE)
-static inline void __init omap_init_mbox(void)
-{
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-	struct omap_mbox_pdata *pdata;
-
-	oh = omap_hwmod_lookup("mailbox");
-	if (!oh) {
-		pr_err("%s: unable to find hwmod\n", __func__);
-		return;
-	}
-	if (!oh->dev_attr) {
-		pr_err("%s: hwmod doesn't have valid attrs\n", __func__);
-		return;
-	}
-
-	pdata = (struct omap_mbox_pdata *)oh->dev_attr;
-	pdev = omap_device_build("omap-mailbox", -1, oh, pdata, sizeof(*pdata));
-	WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n",
-						__func__, PTR_ERR(pdev));
-}
-#else
-static inline void omap_init_mbox(void) { }
-#endif /* CONFIG_OMAP2PLUS_MBOX */
-
 static inline void omap_init_sti(void) {}
 
 #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
@@ -229,7 +202,6 @@
 		 * please keep these calls, and their implementations above,
 		 * in alphabetical order so they're easier to sort through.
 		 */
-		omap_init_mbox();
 		omap_init_mcspi();
 		omap_init_sham();
 		omap_init_aes();
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index e781e4f..a935d28 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -23,6 +23,8 @@
 #include <linux/platform_data/pinctrl-single.h>
 #include <linux/platform_data/iommu-omap.h>
 #include <linux/platform_data/wkup_m3.h>
+#include <linux/platform_data/pwm_omap_dmtimer.h>
+#include <plat/dmtimer.h>
 
 #include "common.h"
 #include "common-board-devices.h"
@@ -449,6 +451,24 @@
 	dev->platform_data = &twl_gpio_auxdata;
 }
 
+/* Dual mode timer PWM callbacks platdata */
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+struct pwm_omap_dmtimer_pdata pwm_dmtimer_pdata = {
+	.request_by_node = omap_dm_timer_request_by_node,
+	.free = omap_dm_timer_free,
+	.enable = omap_dm_timer_enable,
+	.disable = omap_dm_timer_disable,
+	.get_fclk = omap_dm_timer_get_fclk,
+	.start = omap_dm_timer_start,
+	.stop = omap_dm_timer_stop,
+	.set_load = omap_dm_timer_set_load,
+	.set_match = omap_dm_timer_set_match,
+	.set_pwm = omap_dm_timer_set_pwm,
+	.set_prescaler = omap_dm_timer_set_prescaler,
+	.write_counter = omap_dm_timer_write_counter,
+};
+#endif
+
 /*
  * Few boards still need auxdata populated before we populate
  * the dev entries in of_platform_populate().
@@ -502,6 +522,9 @@
 	OF_DEV_AUXDATA("ti,am4372-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
 		       &wkup_m3_data),
 #endif
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+	OF_DEV_AUXDATA("ti,omap-dmtimer-pwm", 0, NULL, &pwm_dmtimer_pdata),
+#endif
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
 	OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu",
 		       &omap4_iommu_pdata),
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index eafd120..1b9f052 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -86,13 +86,18 @@
 	stmfd	sp!, {lr}	@ save registers on stack
 	/* Setup so that we will disable and enable l2 */
 	mov	r1, #0x1
-	adrl	r2, l2dis_3630	@ may be too distant for plain adr
-	str	r1, [r2]
+	adrl	r3, l2dis_3630_offset	@ may be too distant for plain adr
+	ldr	r2, [r3]		@ value for offset
+	str	r1, [r2, r3]		@ write to l2dis_3630
 	ldmfd	sp!, {pc}	@ restore regs and return
 ENDPROC(enable_omap3630_toggle_l2_on_restore)
 
-	.text
-/* Function to call rom code to save secure ram context */
+/*
+ * Function to call rom code to save secure ram context. This gets
+ * relocated to SRAM, so it can be all in .data section. Otherwise
+ * we need to initialize api_params separately.
+ */
+	.data
 	.align	3
 ENTRY(save_secure_ram_context)
 	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
@@ -126,6 +131,8 @@
 ENTRY(save_secure_ram_context_sz)
 	.word	. - save_secure_ram_context
 
+	.text
+
 /*
  * ======================
  * == Idle entry point ==
@@ -289,12 +296,6 @@
 	bic	r5, r5, #0x40
 	str	r5, [r4]
 
-/*
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
- * base instead.
- * Be careful not to clobber r7 when maintaing this code.
- */
-
 is_dll_in_lock_mode:
 	/* Is dll in lock mode? */
 	ldr	r4, sdrc_dlla_ctrl
@@ -302,11 +303,7 @@
 	tst	r5, #0x4
 	bne	exit_nonoff_modes	@ Return if locked
 	/* wait till dll locks */
-	adr	r7, kick_counter
 wait_dll_lock_timed:
-	ldr	r4, wait_dll_lock_counter
-	add	r4, r4, #1
-	str	r4, [r7, #wait_dll_lock_counter - kick_counter]
 	ldr	r4, sdrc_dlla_status
 	/* Wait 20uS for lock */
 	mov	r6, #8
@@ -330,9 +327,6 @@
 	orr	r6, r6, #(1<<3)		@ enable dll
 	str	r6, [r4]
 	dsb
-	ldr	r4, kick_counter
-	add	r4, r4, #1
-	str	r4, [r7]		@ kick_counter
 	b	wait_dll_lock_timed
 
 exit_nonoff_modes:
@@ -360,15 +354,6 @@
 	.word	SDRC_DLLA_STATUS_V
 sdrc_dlla_ctrl:
 	.word	SDRC_DLLA_CTRL_V
-	/*
-	 * When exporting to userspace while the counters are in SRAM,
-	 * these 2 words need to be at the end to facilitate retrival!
-	 */
-kick_counter:
-	.word	0
-wait_dll_lock_counter:
-	.word	0
-
 ENTRY(omap3_do_wfi_sz)
 	.word	. - omap3_do_wfi
 
@@ -437,7 +422,9 @@
 	cmp	r2, #0x0	@ Check if target power state was OFF or RET
 	bne	logic_l1_restore
 
-	ldr	r0, l2dis_3630
+	adr	r1, l2dis_3630_offset	@ address for offset
+	ldr	r0, [r1]		@ value for offset
+	ldr	r0, [r1, r0]		@ value at l2dis_3630
 	cmp	r0, #0x1	@ should we disable L2 on 3630?
 	bne	skipl2dis
 	mrc	p15, 0, r0, c1, c0, 1
@@ -449,12 +436,14 @@
 	and	r1, #0x700
 	cmp	r1, #0x300
 	beq	l2_inv_gp
+	adr	r0, l2_inv_api_params_offset
+	ldr	r3, [r0]
+	add	r3, r3, r0		@ r3 points to dummy parameters
 	mov	r0, #40			@ set service ID for PPA
 	mov	r12, r0			@ copy secure Service ID in r12
 	mov	r1, #0			@ set task id for ROM code in r1
 	mov	r2, #4			@ set some flags in r2, r6
 	mov	r6, #0xff
-	adr	r3, l2_inv_api_params	@ r3 points to dummy parameters
 	dsb				@ data write barrier
 	dmb				@ data memory barrier
 	smc	#1			@ call SMI monitor (smi #1)
@@ -488,8 +477,8 @@
 	b	logic_l1_restore
 
 	.align
-l2_inv_api_params:
-	.word	0x1, 0x00
+l2_inv_api_params_offset:
+	.long	l2_inv_api_params - .
 l2_inv_gp:
 	/* Execute smi to invalidate L2 cache */
 	mov r12, #0x1			@ set up to invalidate L2
@@ -506,7 +495,9 @@
 	mov	r12, #0x2
 	smc	#0			@ Call SMI monitor (smieq)
 logic_l1_restore:
-	ldr	r1, l2dis_3630
+	adr	r0, l2dis_3630_offset	@ adress for offset
+	ldr	r1, [r0]		@ value for offset
+	ldr	r1, [r0, r1]		@ value at l2dis_3630
 	cmp	r1, #0x1		@ Test if L2 re-enable needed on 3630
 	bne	skipl2reen
 	mrc	p15, 0, r1, c1, c0, 1
@@ -535,9 +526,17 @@
 	.word	CONTROL_STAT
 control_mem_rta:
 	.word	CONTROL_MEM_RTA_CTRL
+l2dis_3630_offset:
+	.long	l2dis_3630 - .
+
+	.data
 l2dis_3630:
 	.word	0
 
+	.data
+l2_inv_api_params:
+	.word	0x1, 0x00
+
 /*
  * Internal functions
  */
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9b09d85..c7a3b4a 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -29,12 +29,6 @@
 	dsb
 .endm
 
-ppa_zero_params:
-	.word		0x0
-
-ppa_por_params:
-	.word		1, 0
-
 #ifdef CONFIG_ARCH_OMAP4
 
 /*
@@ -266,7 +260,9 @@
 	beq	skip_ns_smp_enable
 ppa_actrl_retry:
 	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
-	adr	r3, ppa_zero_params		@ Pointer to parameters
+	adr	r1, ppa_zero_params_offset
+	ldr	r3, [r1]
+	add	r3, r3, r1			@ Pointer to ppa_zero_params
 	mov	r1, #0x0			@ Process ID
 	mov	r2, #0x4			@ Flag
 	mov	r6, #0xff
@@ -303,7 +299,9 @@
 	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
 	ldr     r1, =OMAP44XX_SAR_RAM_BASE
 	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
-	adr     r3, ppa_por_params
+	adr     r1, ppa_por_params_offset
+	ldr	r3, [r1]
+	add	r3, r3, r1			@ Pointer to ppa_por_params
 	str     r4, [r3, #0x04]
 	mov	r1, #0x0			@ Process ID
 	mov	r2, #0x4			@ Flag
@@ -328,6 +326,8 @@
 #endif
 
 	b	cpu_resume			@ Jump to generic resume
+ppa_por_params_offset:
+	.long	ppa_por_params - .
 ENDPROC(omap4_cpu_resume)
 #endif	/* CONFIG_ARCH_OMAP4 */
 
@@ -380,4 +380,13 @@
 	nop
 
 	ldmfd	sp!, {pc}
+ppa_zero_params_offset:
+	.long	ppa_zero_params - .
 ENDPROC(omap_do_wfi)
+
+	.data
+ppa_zero_params:
+	.word		0
+
+ppa_por_params:
+	.word		1, 0
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index def40a0..70ab4a2 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -1,5 +1,6 @@
 menuconfig ARCH_REALVIEW
-	bool "ARM Ltd. RealView family" if ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7
+	bool "ARM Ltd. RealView family"
+	depends on ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7
 	select ARM_AMBA
 	select ARM_TIMER_SP804
 	select COMMON_CLK_VERSATILE
diff --git a/arch/arm/mach-realview/platsmp-dt.c b/arch/arm/mach-realview/platsmp-dt.c
index 6558539..6964e88 100644
--- a/arch/arm/mach-realview/platsmp-dt.c
+++ b/arch/arm/mach-realview/platsmp-dt.c
@@ -80,7 +80,7 @@
 		     virt_to_phys(versatile_secondary_startup));
 }
 
-struct smp_operations realview_dt_smp_ops __initdata = {
+static const struct smp_operations realview_dt_smp_ops __initconst = {
 	.smp_prepare_cpus	= realview_smp_prepare_cpus,
 	.smp_secondary_init	= versatile_secondary_init,
 	.smp_boot_secondary	= versatile_boot_secondary,
diff --git a/arch/arm/mach-tango/Kconfig b/arch/arm/mach-tango/Kconfig
index d6a3714..ebe15b9 100644
--- a/arch/arm/mach-tango/Kconfig
+++ b/arch/arm/mach-tango/Kconfig
@@ -1,5 +1,6 @@
 config ARCH_TANGO
-	bool "Sigma Designs Tango4 (SMP87xx)" if ARCH_MULTI_V7
+	bool "Sigma Designs Tango4 (SMP87xx)"
+	depends on ARCH_MULTI_V7
 	# Cortex-A9 MPCore r3p0, PL310 r3p2
 	select ARCH_HAS_HOLES_MEMORYMODEL
 	select ARM_ERRATA_754322
diff --git a/arch/arm/mach-tango/platsmp.c b/arch/arm/mach-tango/platsmp.c
index a18d5a3..a21f55e 100644
--- a/arch/arm/mach-tango/platsmp.c
+++ b/arch/arm/mach-tango/platsmp.c
@@ -9,7 +9,7 @@
 	return 0;
 }
 
-static struct smp_operations tango_smp_ops __initdata = {
+static const struct smp_operations tango_smp_ops __initconst = {
 	.smp_boot_secondary	= tango_boot_secondary,
 };
 
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 307237c..b5e3f6d 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -88,7 +88,7 @@
 Image.%: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
-zinstall install: vmlinux
+zinstall install:
 	$(Q)$(MAKE) $(build)=$(boot) $@
 
 %.dtb: scripts
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index abcbba2..305c552 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -34,10 +34,10 @@
 $(obj)/Image.lzo: $(obj)/Image FORCE
 	$(call if_changed,lzo)
 
-install: $(obj)/Image
+install:
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image System.map "$(INSTALL_PATH)"
 
-zinstall: $(obj)/Image.gz
+zinstall:
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index dd5158e..e5b59ca 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -115,6 +115,7 @@
 			     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
index 7dfe1c0..62f33fc 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
@@ -12,6 +12,8 @@
 		rtc1 = "/rtc@0,7000e000";
 	};
 
+	chosen { };
+
 	memory {
 		device_type = "memory";
 		reg = <0x0 0x80000000 0x0 0x80000000>;
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
index 12ed78a..d91e1f0 100644
--- a/arch/arm64/boot/install.sh
+++ b/arch/arm64/boot/install.sh
@@ -20,6 +20,20 @@
 #   $4 - default install path (blank if root directory)
 #
 
+verify () {
+	if [ ! -f "$1" ]; then
+		echo ""                                                   1>&2
+		echo " *** Missing file: $1"                              1>&2
+		echo ' *** You need to run "make" before "make install".' 1>&2
+		echo ""                                                   1>&2
+		exit 1
+	fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
 # User may have a custom install script
 if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
 if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 05d9e16..7a3d22a 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -294,7 +294,7 @@
 	.cra_blkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
+		.ivsize		= 0,
 		.setkey		= aes_setkey,
 		.encrypt	= ecb_encrypt,
 		.decrypt	= ecb_decrypt,
@@ -371,7 +371,7 @@
 	.cra_ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
+		.ivsize		= 0,
 		.setkey		= ablk_set_key,
 		.encrypt	= ablk_encrypt,
 		.decrypt	= ablk_decrypt,
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 2731d3b..8ec88e5 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -103,6 +103,7 @@
 	u64 irqstat;
 
 	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+	dsb(sy);
 	return irqstat;
 }
 
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 007a69f..5f3ab8c 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -121,6 +121,7 @@
 		return -EFAULT;
 
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
@@ -137,6 +138,7 @@
 "	.align	3\n"
 "	.quad	1b, 4b, 2b, 4b\n"
 "	.popsection\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 	: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
 	: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
 	: "memory");
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 738a95f..d201d4b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,8 +107,6 @@
 #define TCR_EL2_MASK	(TCR_EL2_TG0 | TCR_EL2_SH0 | \
 			 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
 
-#define TCR_EL2_FLAGS	(TCR_EL2_RES1 | TCR_EL2_PS_40B)
-
 /* VTCR_EL2 Registers bits */
 #define VTCR_EL2_RES1		(1 << 31)
 #define VTCR_EL2_PS_MASK	(7 << 16)
@@ -182,6 +180,7 @@
 #define CPTR_EL2_TCPAC	(1 << 31)
 #define CPTR_EL2_TTA	(1 << 20)
 #define CPTR_EL2_TFP	(1 << CPTR_EL2_TFP_SHIFT)
+#define CPTR_EL2_DEFAULT	0x000033ff
 
 /* Hyp Debug Configuration Register bits */
 #define MDCR_EL2_TDRA		(1 << 11)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3066328..779a587 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -127,10 +127,14 @@
 
 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 {
-	u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+	u32 mode;
 
-	if (vcpu_mode_is_32bit(vcpu))
+	if (vcpu_mode_is_32bit(vcpu)) {
+		mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
 		return mode > COMPAT_PSR_MODE_USR;
+	}
+
+	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
 
 	return mode != PSR_MODE_EL0t;
 }
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 9b2f5a9..ae615b9 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -39,6 +39,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
 #include <asm/pgtable-types.h>
 
 extern void __cpu_clear_user_page(void *p, unsigned long user);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 8aee3ae..c536c9e 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,11 +226,28 @@
 	return retval;
 }
 
+static void send_user_sigtrap(int si_code)
+{
+	struct pt_regs *regs = current_pt_regs();
+	siginfo_t info = {
+		.si_signo	= SIGTRAP,
+		.si_errno	= 0,
+		.si_code	= si_code,
+		.si_addr	= (void __user *)instruction_pointer(regs),
+	};
+
+	if (WARN_ON(!user_mode(regs)))
+		return;
+
+	if (interrupts_enabled(regs))
+		local_irq_enable();
+
+	force_sig_info(SIGTRAP, &info, current);
+}
+
 static int single_step_handler(unsigned long addr, unsigned int esr,
 			       struct pt_regs *regs)
 {
-	siginfo_t info;
-
 	/*
 	 * If we are stepping a pending breakpoint, call the hw_breakpoint
 	 * handler first.
@@ -239,11 +256,7 @@
 		return 0;
 
 	if (user_mode(regs)) {
-		info.si_signo = SIGTRAP;
-		info.si_errno = 0;
-		info.si_code  = TRAP_HWBKPT;
-		info.si_addr  = (void __user *)instruction_pointer(regs);
-		force_sig_info(SIGTRAP, &info, current);
+		send_user_sigtrap(TRAP_HWBKPT);
 
 		/*
 		 * ptrace will disable single step unless explicitly
@@ -307,17 +320,8 @@
 static int brk_handler(unsigned long addr, unsigned int esr,
 		       struct pt_regs *regs)
 {
-	siginfo_t info;
-
 	if (user_mode(regs)) {
-		info = (siginfo_t) {
-			.si_signo = SIGTRAP,
-			.si_errno = 0,
-			.si_code  = TRAP_BRKPT,
-			.si_addr  = (void __user *)instruction_pointer(regs),
-		};
-
-		force_sig_info(SIGTRAP, &info, current);
+		send_user_sigtrap(TRAP_BRKPT);
 	} else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
 		pr_warning("Unexpected kernel BRK exception at EL1\n");
 		return -EFAULT;
@@ -328,7 +332,6 @@
 
 int aarch32_break_handler(struct pt_regs *regs)
 {
-	siginfo_t info;
 	u32 arm_instr;
 	u16 thumb_instr;
 	bool bp = false;
@@ -359,14 +362,7 @@
 	if (!bp)
 		return -EFAULT;
 
-	info = (siginfo_t) {
-		.si_signo = SIGTRAP,
-		.si_errno = 0,
-		.si_code  = TRAP_BRKPT,
-		.si_addr  = pc,
-	};
-
-	force_sig_info(SIGTRAP, &info, current);
+	send_user_sigtrap(TRAP_BRKPT);
 	return 0;
 }
 
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 999633b..352f7ab 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -89,6 +89,7 @@
 __efistub_memmove		= KALLSYMS_HIDE(__pi_memmove);
 __efistub_memset		= KALLSYMS_HIDE(__pi_memset);
 __efistub_strlen		= KALLSYMS_HIDE(__pi_strlen);
+__efistub_strnlen		= KALLSYMS_HIDE(__pi_strnlen);
 __efistub_strcmp		= KALLSYMS_HIDE(__pi_strcmp);
 __efistub_strncmp		= KALLSYMS_HIDE(__pi_strncmp);
 __efistub___flush_dcache_area	= KALLSYMS_HIDE(__pi___flush_dcache_area);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4fad978..d9751a4 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -44,14 +44,13 @@
 	unsigned long irq_stack_ptr;
 
 	/*
-	 * Use raw_smp_processor_id() to avoid false-positives from
-	 * CONFIG_DEBUG_PREEMPT. get_wchan() calls unwind_frame() on sleeping
-	 * task stacks, we can be pre-empted in this case, so
-	 * {raw_,}smp_processor_id() may give us the wrong value. Sleeping
-	 * tasks can't ever be on an interrupt stack, so regardless of cpu,
-	 * the checks will always fail.
+	 * Switching between stacks is valid when tracing current and in
+	 * non-preemptible context.
 	 */
-	irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id());
+	if (tsk == current && !preemptible())
+		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+	else
+		irq_stack_ptr = 0;
 
 	low  = frame->sp;
 	/* irq stacks are not THREAD_SIZE aligned */
@@ -64,8 +63,8 @@
 		return -EINVAL;
 
 	frame->sp = fp + 0x10;
-	frame->fp = *(unsigned long *)(fp);
-	frame->pc = *(unsigned long *)(fp + 8);
+	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	if (tsk && tsk->ret_stack &&
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index cbedd72..c539208 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -146,9 +146,18 @@
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
 	struct stackframe frame;
-	unsigned long irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+	unsigned long irq_stack_ptr;
 	int skip;
 
+	/*
+	 * Switching between stacks is valid when tracing current and in
+	 * non-preemptible context.
+	 */
+	if (tsk == current && !preemptible())
+		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+	else
+		irq_stack_ptr = 0;
+
 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 
 	if (!tsk)
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 3e568dc..d073b5a 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -64,7 +64,7 @@
 	mrs	x4, tcr_el1
 	ldr	x5, =TCR_EL2_MASK
 	and	x4, x4, x5
-	ldr	x5, =TCR_EL2_FLAGS
+	mov	x5, #TCR_EL2_RES1
 	orr	x4, x4, x5
 
 #ifndef CONFIG_ARM64_VA_BITS_48
@@ -85,14 +85,16 @@
 	ldr_l	x5, idmap_t0sz
 	bfi	x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
 #endif
+	/*
+	 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
+	 * TCR_EL2 and VTCR_EL2.
+	 */
+	mrs	x5, ID_AA64MMFR0_EL1
+	bfi	x4, x5, #16, #3
+
 	msr	tcr_el2, x4
 
 	ldr	x4, =VTCR_EL2_FLAGS
-	/*
-	 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
-	 * VTCR_EL2.
-	 */
-	mrs	x5, ID_AA64MMFR0_EL1
 	bfi	x4, x5, #16, #3
 	/*
 	 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca8f5a5..f0e7bdf 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -36,7 +36,11 @@
 	write_sysreg(val, hcr_el2);
 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
 	write_sysreg(1 << 15, hstr_el2);
-	write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+
+	val = CPTR_EL2_DEFAULT;
+	val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+	write_sysreg(val, cptr_el2);
+
 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
 }
 
@@ -45,7 +49,7 @@
 	write_sysreg(HCR_RW, hcr_el2);
 	write_sysreg(0, hstr_el2);
 	write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
-	write_sysreg(0, cptr_el2);
+	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 
 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 648112e..4d1ac81 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -27,7 +27,11 @@
 
 #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
 				 PSR_I_BIT | PSR_D_BIT)
-#define EL1_EXCEPT_SYNC_OFFSET	0x200
+
+#define CURRENT_EL_SP_EL0_VECTOR	0x0
+#define CURRENT_EL_SP_ELx_VECTOR	0x200
+#define LOWER_EL_AArch64_VECTOR		0x400
+#define LOWER_EL_AArch32_VECTOR		0x600
 
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
@@ -97,6 +101,34 @@
 		*fsr = 0x14;
 }
 
+enum exception_type {
+	except_type_sync	= 0,
+	except_type_irq		= 0x80,
+	except_type_fiq		= 0x100,
+	except_type_serror	= 0x180,
+};
+
+static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
+{
+	u64 exc_offset;
+
+	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
+	case PSR_MODE_EL1t:
+		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+		break;
+	case PSR_MODE_EL1h:
+		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+		break;
+	case PSR_MODE_EL0t:
+		exc_offset = LOWER_EL_AArch64_VECTOR;
+		break;
+	default:
+		exc_offset = LOWER_EL_AArch32_VECTOR;
+	}
+
+	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+}
+
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
 	unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
 
@@ -143,8 +175,8 @@
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 	/*
 	 * Build an unknown exception, depending on the instruction
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index eec3598..2e90371 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1007,10 +1007,9 @@
 		if (likely(r->access(vcpu, params, r))) {
 			/* Skip instruction, since it was emulated */
 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+			/* Handled */
+			return 0;
 		}
-
-		/* Handled */
-		return 0;
 	}
 
 	/* Not handled */
@@ -1043,7 +1042,7 @@
 }
 
 /**
- * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  */
@@ -1095,7 +1094,7 @@
 }
 
 /**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  */
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index 2ca6657..eae38da 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -168,4 +168,4 @@
 .Lhit_limit:
 	mov	len, limit
 	ret
-ENDPROC(strnlen)
+ENDPIPROC(strnlen)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 331c4ca..a6e757c 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -933,6 +933,10 @@
 		ret = register_iommu_dma_ops_notifier(&platform_bus_type);
 	if (!ret)
 		ret = register_iommu_dma_ops_notifier(&amba_bustype);
+
+	/* handle devices queued before this arch_initcall */
+	if (!ret)
+		__iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
 	return ret;
 }
 arch_initcall(__iommu_dma_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 92ddac1..abe2a95 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -371,6 +371,13 @@
 	return 0;
 }
 
+static int do_alignment_fault(unsigned long addr, unsigned int esr,
+			      struct pt_regs *regs)
+{
+	do_bad_area(addr, esr, regs);
+	return 0;
+}
+
 /*
  * This abort handler always returns "fault".
  */
@@ -418,7 +425,7 @@
 	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk)" },
 	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk)" },
 	{ do_bad,		SIGBUS,  0,		"unknown 32"			},
-	{ do_bad,		SIGBUS,  BUS_ADRALN,	"alignment fault"		},
+	{ do_alignment_fault,	SIGBUS,  BUS_ADRALN,	"alignment fault"		},
 	{ do_bad,		SIGBUS,  0,		"unknown 34"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 35"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 36"			},
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index cf62407..0795c3a 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -44,6 +45,7 @@
 	unsigned long end = start + size;
 	int ret;
 	struct page_change_data data;
+	struct vm_struct *area;
 
 	if (!PAGE_ALIGNED(addr)) {
 		start &= PAGE_MASK;
@@ -51,10 +53,23 @@
 		WARN_ON_ONCE(1);
 	}
 
-	if (start < MODULES_VADDR || start >= MODULES_END)
-		return -EINVAL;
-
-	if (end < MODULES_VADDR || end >= MODULES_END)
+	/*
+	 * Kernel VA mappings are always live, and splitting live section
+	 * mappings into page mappings may cause TLB conflicts. This means
+	 * we have to ensure that changing the permission bits of the range
+	 * we are operating on does not result in such splitting.
+	 *
+	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
+	 * Those are guaranteed to consist entirely of page mappings, and
+	 * splitting is never needed.
+	 *
+	 * So check whether the [addr, addr + size) interval is entirely
+	 * covered by precisely one VM area that has the VM_ALLOC flag set.
+	 */
+	area = find_vm_area((void *)addr);
+	if (!area ||
+	    end > (unsigned long)area->addr + area->size ||
+	    !(area->flags & VM_ALLOC))
 		return -EINVAL;
 
 	if (!numpages)
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 836ac5a..2841c0a 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -276,6 +276,7 @@
 
 config SMP
 	bool "Symmetric multi-processing support"
+	depends on MMU
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, say N. If you have a system with more
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index fc96e81..d1fc479 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -108,6 +108,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -366,6 +374,7 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_HYDRA=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 05c904f..9bfe8be 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -106,6 +106,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -344,6 +352,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index d572b73..ebdcfae 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -106,6 +106,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -353,6 +361,7 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 11a30c6..8acc65e 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -104,6 +104,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6630a51..0c6a3d5 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -106,6 +106,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -345,6 +353,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 1d90b71..12a8a6c 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -105,6 +105,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -362,6 +370,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 1fd21c1..64ff2dc 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -115,6 +115,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -276,6 +278,12 @@
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -404,6 +412,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_HYDRA=y
 CONFIG_MAC8390=y
 CONFIG_NE2000=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 74e10f7..07fc6ab 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -103,6 +103,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -261,6 +263,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 7034e71..69903de 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -104,6 +104,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index f7deb5f..bd84016 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -104,6 +104,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -352,6 +360,7 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 0ce79eb..5f9fb3a 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -101,6 +101,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -340,6 +348,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 4cb787e..5d1c674 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -101,6 +101,8 @@
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -341,6 +349,7 @@
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f9d96bf..bafaff6 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls		376
+#define NR_syscalls		377
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 36cf129..0ca7296 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -381,5 +381,6 @@
 #define __NR_userfaultfd	373
 #define __NR_membarrier		374
 #define __NR_mlock2		375
+#define __NR_copy_file_range	376
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 282cd90..8bb9426 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -396,3 +396,4 @@
 	.long sys_userfaultfd
 	.long sys_membarrier
 	.long sys_mlock2		/* 375 */
+	.long sys_copy_file_range
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 57a945e..74a3db9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2085,7 +2085,7 @@
 
 config PAGE_SIZE_64KB
 	bool "64kB"
-	depends on !CPU_R3000 && !CPU_TX39XX
+	depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000
 	help
 	  Using 64kB page size will result in higher performance kernel at
 	  the price of higher memory consumption.  This option is available on
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
index 459b9b2..d61b161 100644
--- a/arch/mips/boot/dts/brcm/bcm6328.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -74,6 +74,7 @@
 		timer: timer@10000040 {
 			compatible = "syscon";
 			reg = <0x10000040 0x2c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
index 4fc7ece..1a7efa8 100644
--- a/arch/mips/boot/dts/brcm/bcm7125.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -98,6 +98,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x60c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
index a3039bb..d4bf52c 100644
--- a/arch/mips/boot/dts/brcm/bcm7346.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -118,6 +118,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
index 4274ff4..8e25016 100644
--- a/arch/mips/boot/dts/brcm/bcm7358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -112,6 +112,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
index 0dcc9163..7e5f760 100644
--- a/arch/mips/boot/dts/brcm/bcm7360.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -112,6 +112,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
index 2f3f9fc..c739ea7 100644
--- a/arch/mips/boot/dts/brcm/bcm7362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -118,6 +118,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
index bee221b..5f55d0a 100644
--- a/arch/mips/boot/dts/brcm/bcm7420.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -99,6 +99,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x60c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index 571f30f..e24d41a 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -100,6 +100,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index 614ee21..8b9432c 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -114,6 +114,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index cefb7a5..e090fc3 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -227,7 +227,7 @@
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 		__res = 0;						\
@@ -258,7 +258,7 @@
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS64)			\
 		__res = 0;						\
@@ -285,6 +285,11 @@
 
 #endif /* !defined(ELF_ARCH) */
 
+#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
+
+#define vmcore_elf32_check_arch mips_elf_check_machine
+#define vmcore_elf64_check_arch mips_elf_check_machine
+
 struct mips_abi;
 
 extern struct mips_abi mips_abi;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 9cbf383..f06f97bd 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -179,6 +179,10 @@
 		if (save)
 			_save_fp(tsk);
 		__disable_fpu();
+	} else {
+		/* FPU should not have been left enabled with no owner */
+		WARN(read_c0_status() & ST0_CU1,
+		     "Orphaned FPU left enabled");
 	}
 	KSTK_STATUS(tsk) &= ~ST0_CU1;
 	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 8ebd3f57..3ed10a8 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -128,7 +128,8 @@
 	case OCTEON_FEATURE_PCIE:
 		return OCTEON_IS_MODEL(OCTEON_CN56XX)
 			|| OCTEON_IS_MODEL(OCTEON_CN52XX)
-			|| OCTEON_IS_MODEL(OCTEON_CN6XXX);
+			|| OCTEON_IS_MODEL(OCTEON_CN6XXX)
+			|| OCTEON_IS_MODEL(OCTEON_CN7XXX);
 
 	case OCTEON_FEATURE_SRIO:
 		return OCTEON_IS_MODEL(OCTEON_CN63XX)
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 3f832c3..041153f 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -45,7 +45,7 @@
  * User space process size: 2GB. This is hardcoded into a few places,
  * so don't change it unless you know what you are doing.
  */
-#define TASK_SIZE	0x7fff8000UL
+#define TASK_SIZE	0x80000000UL
 #endif
 
 #define STACK_TOP_MAX	TASK_SIZE
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index a71da57..eebf395 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -289,7 +289,7 @@
 		.set	reorder
 		.set	noat
 		mfc0	a0, CP0_STATUS
-		li	v1, 0xff00
+		li	v1, ST0_CU1 | ST0_IM
 		ori	a0, STATMASK
 		xori	a0, STATMASK
 		mtc0	a0, CP0_STATUS
@@ -330,7 +330,7 @@
 		ori	a0, STATMASK
 		xori	a0, STATMASK
 		mtc0	a0, CP0_STATUS
-		li	v1, 0xff00
+		li	v1, ST0_CU1 | ST0_FR | ST0_IM
 		and	a0, v1
 		LONG_L	v0, PT_STATUS(sp)
 		nor	v1, $0, v1
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6499d93..47bc45a 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -101,10 +101,8 @@
 	/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
 	if ((config_enabled(CONFIG_32BIT) ||
 	    test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
-	    (regs->regs[2] == __NR_syscall)) {
+	    (regs->regs[2] == __NR_syscall))
 		i++;
-		n++;
-	}
 
 	while (n--)
 		ret |= mips_get_syscall_arg(args++, task, regs, i++);
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 90f03a7..3129795 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -380,16 +380,17 @@
 #define __NR_userfaultfd		(__NR_Linux + 357)
 #define __NR_membarrier			(__NR_Linux + 358)
 #define __NR_mlock2			(__NR_Linux + 359)
+#define __NR_copy_file_range		(__NR_Linux + 360)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls		359
+#define __NR_Linux_syscalls		360
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		359
+#define __NR_O32_Linux_syscalls		360
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
@@ -717,16 +718,17 @@
 #define __NR_userfaultfd		(__NR_Linux + 317)
 #define __NR_membarrier			(__NR_Linux + 318)
 #define __NR_mlock2			(__NR_Linux + 319)
+#define __NR_copy_file_range		(__NR_Linux + 320)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls		319
+#define __NR_Linux_syscalls		320
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		319
+#define __NR_64_Linux_syscalls		320
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
@@ -1058,15 +1060,16 @@
 #define __NR_userfaultfd		(__NR_Linux + 321)
 #define __NR_membarrier			(__NR_Linux + 322)
 #define __NR_mlock2			(__NR_Linux + 323)
+#define __NR_copy_file_range		(__NR_Linux + 324)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls		323
+#define __NR_Linux_syscalls		324
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		323
+#define __NR_N32_Linux_syscalls		324
 
 #endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 1188e00..1b992c6 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -35,7 +35,7 @@
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 		__res = 0;						\
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 9287678..abd3aff 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -47,7 +47,7 @@
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 		__res = 0;						\
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index f2975d4..eddd5fd 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -65,12 +65,10 @@
 	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
 	status |= KU_USER;
 	regs->cp0_status = status;
-	clear_used_math();
-	clear_fpu_owner();
-	init_dsp();
-	clear_thread_flag(TIF_USEDMSA);
+	lose_fpu(0);
 	clear_thread_flag(TIF_MSA_CTX_LIVE);
-	disable_msa();
+	clear_used_math();
+	init_dsp();
 	regs->cp0_epc = pc;
 	regs->regs[29] = sp;
 }
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2d23c83..a563174 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -595,3 +595,4 @@
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range		/* 4360 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index deac633..2b2dc14 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -433,4 +433,5 @@
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range		/* 5320 */
 	.size	sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 5a69eb4..2bf5c85 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -423,4 +423,5 @@
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range
 	.size	sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e4b6d7c..c5b759e 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -578,4 +578,5 @@
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range		/* 4360 */
 	.size	sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 569a7d5..5fdaf8b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -782,6 +782,7 @@
 void __init setup_arch(char **cmdline_p)
 {
 	cpu_probe();
+	mips_cm_probe();
 	prom_init();
 
 	setup_early_fdc_console();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bafcb7a..ae790c5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -663,7 +663,7 @@
 	return -1;
 }
 
-static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 {
 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 		int rd = (opcode & MM_RS) >> 16;
@@ -1119,11 +1119,12 @@
 	if (get_isa16_mode(regs->cp0_epc)) {
 		unsigned short mmop[2] = { 0 };
 
-		if (unlikely(get_user(mmop[0], epc) < 0))
+		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
 			status = SIGSEGV;
-		if (unlikely(get_user(mmop[1], epc) < 0))
+		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
 			status = SIGSEGV;
-		opcode = (mmop[0] << 16) | mmop[1];
+		opcode = mmop[0];
+		opcode = (opcode << 16) | mmop[1];
 
 		if (status < 0)
 			status = simulate_rdhwr_mm(regs, opcode);
@@ -1369,26 +1370,12 @@
 		if (unlikely(compute_return_epc(regs) < 0))
 			break;
 
-		if (get_isa16_mode(regs->cp0_epc)) {
-			unsigned short mmop[2] = { 0 };
-
-			if (unlikely(get_user(mmop[0], epc) < 0))
-				status = SIGSEGV;
-			if (unlikely(get_user(mmop[1], epc) < 0))
-				status = SIGSEGV;
-			opcode = (mmop[0] << 16) | mmop[1];
-
-			if (status < 0)
-				status = simulate_rdhwr_mm(regs, opcode);
-		} else {
+		if (!get_isa16_mode(regs->cp0_epc)) {
 			if (unlikely(get_user(opcode, epc) < 0))
 				status = SIGSEGV;
 
 			if (!cpu_has_llsc && status < 0)
 				status = simulate_llsc(regs, opcode);
-
-			if (status < 0)
-				status = simulate_rdhwr_normal(regs, opcode);
 		}
 
 		if (status < 0)
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 3bd0597..2496475 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -181,10 +181,6 @@
 	return 1;
 }
 
-void __weak platform_early_l2_init(void)
-{
-}
-
 static inline int __init mips_sc_probe(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -194,12 +190,6 @@
 	/* Mark as not present until probe completed */
 	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
 
-	/*
-	 * Do we need some platform specific probing before
-	 * we configure L2?
-	 */
-	platform_early_l2_init();
-
 	if (mips_cm_revision() >= CM_REV_CM3)
 		return mips_sc_probe_cm3();
 
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 571148c..dc2c521 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -293,7 +293,6 @@
 	console_config();
 #endif
 	/* Early detection of CMP support */
-	mips_cm_probe();
 	mips_cpc_probe();
 
 	if (!register_cps_smp_ops())
@@ -304,10 +303,3 @@
 		return;
 	register_up_smp_ops();
 }
-
-void platform_early_l2_init(void)
-{
-	/* L2 configuration lives in the CM3 */
-	if (mips_cm_revision() >= CM_REV_CM3)
-		mips_cm_probe();
-}
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index a009ee4..1ae932c2 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -297,12 +297,12 @@
 		return PTR_ERR(rstpcie0);
 
 	bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
-	if (!bridge_base)
-		return -ENOMEM;
+	if (IS_ERR(bridge_base))
+		return PTR_ERR(bridge_base);
 
 	pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
-	if (!pcie_base)
-		return -ENOMEM;
+	if (IS_ERR(pcie_base))
+		return PTR_ERR(pcie_base);
 
 	iomem_resource.start = 0;
 	iomem_resource.end = ~0;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e4824fd..9faa18c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -557,7 +557,7 @@
 
 config PPC_4K_PAGES
 	bool "4k page size"
-	select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+	select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
 
 config PPC_16K_PAGES
 	bool "16k page size"
@@ -566,7 +566,7 @@
 config PPC_64K_PAGES
 	bool "64k page size"
 	depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
-	select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+	select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
 
 config PPC_256K_PAGES
 	bool "256k page size"
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8d1c41d..ac07a30 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -281,6 +281,10 @@
 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 			    pmd_t *pmdp);
 
+#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+				    unsigned long address, pmd_t *pmdp);
+
 #define pmd_move_must_withdraw pmd_move_must_withdraw
 struct spinlock;
 static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index c5eb86f..867c39b 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -81,6 +81,7 @@
 #define EEH_PE_KEEP		(1 << 8)	/* Keep PE on hotplug	*/
 #define EEH_PE_CFG_RESTRICTED	(1 << 9)	/* Block config on error */
 #define EEH_PE_REMOVED		(1 << 10)	/* Removed permanently	*/
+#define EEH_PE_PRI_BUS		(1 << 11)	/* Cached primary bus   */
 
 struct eeh_pe {
 	int type;			/* PE type: PHB/Bus/Device	*/
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 8e86b48..32e36b1 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -57,12 +57,14 @@
 extern void hcall_tracepoint_regfunc(void);
 extern void hcall_tracepoint_unregfunc(void);
 
-TRACE_EVENT_FN(hcall_entry,
+TRACE_EVENT_FN_COND(hcall_entry,
 
 	TP_PROTO(unsigned long opcode, unsigned long *args),
 
 	TP_ARGS(opcode, args),
 
+	TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
 	TP_STRUCT__entry(
 		__field(unsigned long, opcode)
 	),
@@ -76,13 +78,15 @@
 	hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
 );
 
-TRACE_EVENT_FN(hcall_exit,
+TRACE_EVENT_FN_COND(hcall_exit,
 
 	TP_PROTO(unsigned long opcode, unsigned long retval,
 		unsigned long *retbuf),
 
 	TP_ARGS(opcode, retval, retbuf),
 
+	TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
 	TP_STRUCT__entry(
 		__field(unsigned long, opcode)
 		__field(unsigned long, retval)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 9387421..301be31 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -564,6 +564,7 @@
 	 */
 	eeh_pe_state_mark(pe, EEH_PE_KEEP);
 	if (bus) {
+		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
 		pci_lock_rescan_remove();
 		pcibios_remove_pci_devices(bus);
 		pci_unlock_rescan_remove();
@@ -803,6 +804,7 @@
 	 * the their PCI config any more.
 	 */
 	if (frozen_bus) {
+		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
 		eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
 
 		pci_lock_rescan_remove();
@@ -886,6 +888,7 @@
 					continue;
 
 				/* Notify all devices to be down */
+				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
 				bus = eeh_pe_bus_get(phb_pe);
 				eeh_pe_dev_traverse(pe,
 					eeh_report_failure, NULL);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index ca9e537..98f8180 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -928,7 +928,7 @@
 		bus = pe->phb->bus;
 	} else if (pe->type & EEH_PE_BUS ||
 		   pe->type & EEH_PE_DEVICE) {
-		if (pe->bus) {
+		if (pe->state & EEH_PE_PRI_BUS) {
 			bus = pe->bus;
 			goto out;
 		}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ac64ffd..08b7a40 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -340,7 +340,7 @@
 			if (name[0] == '.') {
 				if (strcmp(name+1, "TOC.") == 0)
 					syms[i].st_shndx = SHN_ABS;
-				memmove(name, name+1, strlen(name));
+				syms[i].st_name++;
 			}
 		}
 	}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3124a20..cdf2123 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -646,6 +646,28 @@
 	return pgtable;
 }
 
+void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+			     unsigned long address, pmd_t *pmdp)
+{
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
+
+	/*
+	 * We can't mark the pmd none here, because that will cause a race
+	 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
+	 * we spilt, but at the same time we wan't rest of the ppc64 code
+	 * not to insert hash pte on this, because we will be modifying
+	 * the deposited pgtable in the caller of this function. Hence
+	 * clear the _PAGE_USER so that we move the fault handling to
+	 * higher level function and that will serialize against ptl.
+	 * We need to flush existing hash pte entries here even though,
+	 * the translation is still valid, because we will withdraw
+	 * pgtable_t after this.
+	 */
+	pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
+}
+
+
 /*
  * set a new huge pmd. We should not be called for updating
  * an existing pmd entry. That should go via pmd_hugepage_update.
@@ -663,10 +685,20 @@
 	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
 }
 
+/*
+ * We use this to invalidate a pmdp entry before switching from a
+ * hugepte to regular pmd entry.
+ */
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 		     pmd_t *pmdp)
 {
 	pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
+
+	/*
+	 * This ensures that generic code that rely on IRQ disabling
+	 * to prevent a parallel THP split work as expected.
+	 */
+	kick_all_cpus_sync();
 }
 
 /*
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5f152b9..87f47e5 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -444,9 +444,12 @@
 	 * PCI devices of the PE are expected to be removed prior
 	 * to PE reset.
 	 */
-	if (!edev->pe->bus)
+	if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
 		edev->pe->bus = pci_find_bus(hose->global_number,
 					     pdn->busno);
+		if (edev->pe->bus)
+			edev->pe->state |= EEH_PE_PRI_BUS;
+	}
 
 	/*
 	 * Enable EEH explicitly so that we will do EEH check
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 573ae19..f90dc04 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3180,6 +3180,7 @@
 
 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
        .dma_dev_setup = pnv_pci_dma_dev_setup,
+       .dma_bus_setup = pnv_pci_dma_bus_setup,
 #ifdef CONFIG_PCI_MSI
        .setup_msi_irqs = pnv_setup_msi_irqs,
        .teardown_msi_irqs = pnv_teardown_msi_irqs,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 2f55c86..b1ef84a 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -599,6 +599,9 @@
 	u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
 	long i;
 
+	if (proto_tce & TCE_PCI_WRITE)
+		proto_tce |= TCE_PCI_READ;
+
 	for (i = 0; i < npages; i++) {
 		unsigned long newtce = proto_tce |
 			((rpn + i) << tbl->it_page_shift);
@@ -620,6 +623,9 @@
 
 	BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
 
+	if (newtce & TCE_PCI_WRITE)
+		newtce |= TCE_PCI_READ;
+
 	oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
 	*hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
 	*direction = iommu_tce_direction(oldtce);
@@ -760,6 +766,26 @@
 		phb->dma_dev_setup(phb, pdev);
 }
 
+void pnv_pci_dma_bus_setup(struct pci_bus *bus)
+{
+	struct pci_controller *hose = bus->sysdata;
+	struct pnv_phb *phb = hose->private_data;
+	struct pnv_ioda_pe *pe;
+
+	list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+		if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+			continue;
+
+		if (!pe->pbus)
+			continue;
+
+		if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+			pe->pbus = bus;
+			break;
+		}
+	}
+}
+
 void pnv_pci_shutdown(void)
 {
 	struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7f56313..00691a9 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -242,6 +242,7 @@
 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
 
 extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
+extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
 
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 7aa7991..a52b6cc 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -37,7 +37,7 @@
 	regs->psw.addr = ip;
 }
 #else
-#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#error Include linux/livepatch.h, not asm/livepatch.h
 #endif
 
 #endif
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index cfcba2d..0943b11 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -260,12 +260,13 @@
 void perf_callchain_kernel(struct perf_callchain_entry *entry,
 			   struct pt_regs *regs)
 {
-	unsigned long head;
+	unsigned long head, frame_size;
 	struct stack_frame *head_sf;
 
 	if (user_mode(regs))
 		return;
 
+	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 	head = regs->gprs[15];
 	head_sf = (struct stack_frame *) head;
 
@@ -273,8 +274,9 @@
 		return;
 
 	head = head_sf->back_chain;
-	head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
-			     S390_lowcore.async_stack);
+	head = __store_trace(entry, head,
+			     S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+			     S390_lowcore.async_stack + frame_size);
 
 	__store_trace(entry, head, S390_lowcore.thread_info,
 		      S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 5acba3c..8f64ebd 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -59,26 +59,32 @@
 	}
 }
 
-void save_stack_trace(struct stack_trace *trace)
+static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
 {
-	register unsigned long sp asm ("15");
-	unsigned long orig_sp, new_sp;
+	unsigned long new_sp, frame_size;
 
-	orig_sp = sp;
-	new_sp = save_context_stack(trace, orig_sp,
-				    S390_lowcore.panic_stack - PAGE_SIZE,
-				    S390_lowcore.panic_stack, 1);
-	if (new_sp != orig_sp)
-		return;
+	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+	new_sp = save_context_stack(trace, sp,
+			S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
+			S390_lowcore.panic_stack + frame_size, 1);
 	new_sp = save_context_stack(trace, new_sp,
-				    S390_lowcore.async_stack - ASYNC_SIZE,
-				    S390_lowcore.async_stack, 1);
-	if (new_sp != orig_sp)
-		return;
+			S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+			S390_lowcore.async_stack + frame_size, 1);
 	save_context_stack(trace, new_sp,
 			   S390_lowcore.thread_info,
 			   S390_lowcore.thread_info + THREAD_SIZE, 1);
 }
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	register unsigned long r15 asm ("15");
+	unsigned long sp;
+
+	sp = r15;
+	__save_stack_trace(trace, sp);
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
@@ -86,6 +92,10 @@
 	unsigned long sp, low, high;
 
 	sp = tsk->thread.ksp;
+	if (tsk == current) {
+		/* Get current stack pointer. */
+		asm volatile("la %0,0(15)" : "=a" (sp));
+	}
 	low = (unsigned long) task_stack_page(tsk);
 	high = (unsigned long) task_pt_regs(tsk);
 	save_context_stack(trace, sp, low, high, 0);
@@ -93,3 +103,14 @@
 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+	unsigned long sp;
+
+	sp = kernel_stack_pointer(regs);
+	__save_stack_trace(trace, sp);
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 21a5df9..dde7654 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -18,6 +18,9 @@
 	unsigned long flags;
 	unsigned int *depth;
 
+	/* Avoid lockdep recursion. */
+	if (IS_ENABLED(CONFIG_LOCKDEP))
+		return;
 	local_irq_save(flags);
 	depth = this_cpu_ptr(&diagnose_trace_depth);
 	if (*depth == 0) {
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index fec59c0..792f9c6 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -93,15 +93,19 @@
  */
 int memcpy_real(void *dest, void *src, size_t count)
 {
+	int irqs_disabled, rc;
 	unsigned long flags;
-	int rc;
 
 	if (!count)
 		return 0;
-	local_irq_save(flags);
-	__arch_local_irq_stnsm(0xfbUL);
+	flags = __arch_local_irq_stnsm(0xf8UL);
+	irqs_disabled = arch_irqs_disabled_flags(flags);
+	if (!irqs_disabled)
+		trace_hardirqs_off();
 	rc = __memcpy_real(dest, src, count);
-	local_irq_restore(flags);
+	if (!irqs_disabled)
+		trace_hardirqs_on();
+	__arch_local_irq_ssm(flags);
 	return rc;
 }
 
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index fe0bfe3..1884e17 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -54,12 +54,13 @@
 
 void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
-	unsigned long head;
+	unsigned long head, frame_size;
 	struct stack_frame* head_sf;
 
 	if (user_mode(regs))
 		return;
 
+	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 	head = regs->gprs[15];
 	head_sf = (struct stack_frame*)head;
 
@@ -68,8 +69,9 @@
 
 	head = head_sf->back_chain;
 
-	head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE,
-			    S390_lowcore.async_stack);
+	head = __show_trace(&depth, head,
+			    S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+			    S390_lowcore.async_stack + frame_size);
 
 	__show_trace(&depth, head, S390_lowcore.thread_info,
 		     S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
index e13d41c..f878bec 100644
--- a/arch/um/include/asm/page.h
+++ b/arch/um/include/asm/page.h
@@ -34,21 +34,18 @@
 
 #if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
 
-typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long pte; } pte_t;
 typedef struct { unsigned long pmd; } pmd_t;
 typedef struct { unsigned long pgd; } pgd_t;
-#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
+#define pte_val(p) ((p).pte)
 
-#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
-#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
-#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
-#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
-			      smp_wmb(); \
-			      (to).pte_low = (from).pte_low; })
-#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
-#define pte_set_val(pte, phys, prot) \
-	({ (pte).pte_high = (phys) >> 32; \
-	   (pte).pte_low = (phys) | pgprot_val(prot); })
+#define pte_get_bits(p, bits) ((p).pte & (bits))
+#define pte_set_bits(p, bits) ((p).pte |= (bits))
+#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
+#define pte_copy(to, from) ({ (to).pte = (from).pte; })
+#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
+#define pte_set_val(p, phys, prot) \
+	({ (p).pte = (phys) | pgprot_val(prot); })
 
 #define pmd_val(x)	((x).pmd)
 #define __pmd(x) ((pmd_t) { (x) } )
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9af2e63..c46662f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -475,6 +475,7 @@
 	depends on X86_64
 	depends on X86_EXTENDED_PLATFORM
 	depends on NUMA
+	depends on EFI
 	depends on X86_X2APIC
 	depends on PCI
 	---help---
@@ -777,8 +778,8 @@
 	  HPET is the next generation timer replacing legacy 8254s.
 	  The HPET provides a stable time base on SMP
 	  systems, unlike the TSC, but it is more expensive to access,
-	  as it is off-chip.  You can find the HPET spec at
-	  <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
+	  as it is off-chip.  The interface used is documented
+	  in the HPET spec, revision 1.
 
 	  You can safely choose Y here.  However, HPET will only be
 	  activated if the platform and the BIOS support this feature.
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 19c099a..e795f52 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -41,7 +41,7 @@
 	regs->ip = ip;
 }
 #else
-#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#error Include linux/livepatch.h, not asm/livepatch.h
 #endif
 
 #endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 2d5a50c..20c11d1 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -766,7 +766,7 @@
  * Return saved PC of a blocked thread.
  * What is this good for? it will be always the scheduler or ret_from_fork.
  */
-#define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
+#define thread_saved_pc(t)	READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
 
 #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
 extern unsigned long KSTK_ESP(struct task_struct *task);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 4974274..8836fc9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -323,6 +323,8 @@
 	return 0;
 
 fail:
+	if (amd_uncore_nb)
+		*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
 	kfree(uncore_nb);
 	return -ENOMEM;
 }
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 982ce34..27f89c7 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -232,17 +232,31 @@
 
 /*
  * copy_user_nocache - Uncached memory copy with exception handling
- * This will force destination/source out of cache for more performance.
+ * This will force destination out of cache for more performance.
+ *
+ * Note: Cached memory copy is used when destination or size is not
+ * naturally aligned. That is:
+ *  - Require 8-byte alignment when size is 8 bytes or larger.
+ *  - Require 4-byte alignment when size is 4 bytes.
  */
 ENTRY(__copy_user_nocache)
 	ASM_STAC
+
+	/* If size is less than 8 bytes, go to 4-byte copy */
 	cmpl $8,%edx
-	jb 20f		/* less then 8 bytes, go to byte copy loop */
+	jb .L_4b_nocache_copy_entry
+
+	/* If destination is not 8-byte aligned, "cache" copy to align it */
 	ALIGN_DESTINATION
+
+	/* Set 4x8-byte copy count and remainder */
 	movl %edx,%ecx
 	andl $63,%edx
 	shrl $6,%ecx
-	jz 17f
+	jz .L_8b_nocache_copy_entry	/* jump if count is 0 */
+
+	/* Perform 4x8-byte nocache loop-copy */
+.L_4x8b_nocache_copy_loop:
 1:	movq (%rsi),%r8
 2:	movq 1*8(%rsi),%r9
 3:	movq 2*8(%rsi),%r10
@@ -262,60 +276,106 @@
 	leaq 64(%rsi),%rsi
 	leaq 64(%rdi),%rdi
 	decl %ecx
-	jnz 1b
-17:	movl %edx,%ecx
+	jnz .L_4x8b_nocache_copy_loop
+
+	/* Set 8-byte copy count and remainder */
+.L_8b_nocache_copy_entry:
+	movl %edx,%ecx
 	andl $7,%edx
 	shrl $3,%ecx
-	jz 20f
-18:	movq (%rsi),%r8
-19:	movnti %r8,(%rdi)
+	jz .L_4b_nocache_copy_entry	/* jump if count is 0 */
+
+	/* Perform 8-byte nocache loop-copy */
+.L_8b_nocache_copy_loop:
+20:	movq (%rsi),%r8
+21:	movnti %r8,(%rdi)
 	leaq 8(%rsi),%rsi
 	leaq 8(%rdi),%rdi
 	decl %ecx
-	jnz 18b
-20:	andl %edx,%edx
-	jz 23f
+	jnz .L_8b_nocache_copy_loop
+
+	/* If no byte left, we're done */
+.L_4b_nocache_copy_entry:
+	andl %edx,%edx
+	jz .L_finish_copy
+
+	/* If destination is not 4-byte aligned, go to byte copy: */
+	movl %edi,%ecx
+	andl $3,%ecx
+	jnz .L_1b_cache_copy_entry
+
+	/* Set 4-byte copy count (1 or 0) and remainder */
 	movl %edx,%ecx
-21:	movb (%rsi),%al
-22:	movb %al,(%rdi)
+	andl $3,%edx
+	shrl $2,%ecx
+	jz .L_1b_cache_copy_entry	/* jump if count is 0 */
+
+	/* Perform 4-byte nocache copy: */
+30:	movl (%rsi),%r8d
+31:	movnti %r8d,(%rdi)
+	leaq 4(%rsi),%rsi
+	leaq 4(%rdi),%rdi
+
+	/* If no bytes left, we're done: */
+	andl %edx,%edx
+	jz .L_finish_copy
+
+	/* Perform byte "cache" loop-copy for the remainder */
+.L_1b_cache_copy_entry:
+	movl %edx,%ecx
+.L_1b_cache_copy_loop:
+40:	movb (%rsi),%al
+41:	movb %al,(%rdi)
 	incq %rsi
 	incq %rdi
 	decl %ecx
-	jnz 21b
-23:	xorl %eax,%eax
+	jnz .L_1b_cache_copy_loop
+
+	/* Finished copying; fence the prior stores */
+.L_finish_copy:
+	xorl %eax,%eax
 	ASM_CLAC
 	sfence
 	ret
 
 	.section .fixup,"ax"
-30:	shll $6,%ecx
+.L_fixup_4x8b_copy:
+	shll $6,%ecx
 	addl %ecx,%edx
-	jmp 60f
-40:	lea (%rdx,%rcx,8),%rdx
-	jmp 60f
-50:	movl %ecx,%edx
-60:	sfence
+	jmp .L_fixup_handle_tail
+.L_fixup_8b_copy:
+	lea (%rdx,%rcx,8),%rdx
+	jmp .L_fixup_handle_tail
+.L_fixup_4b_copy:
+	lea (%rdx,%rcx,4),%rdx
+	jmp .L_fixup_handle_tail
+.L_fixup_1b_copy:
+	movl %ecx,%edx
+.L_fixup_handle_tail:
+	sfence
 	jmp copy_user_handle_tail
 	.previous
 
-	_ASM_EXTABLE(1b,30b)
-	_ASM_EXTABLE(2b,30b)
-	_ASM_EXTABLE(3b,30b)
-	_ASM_EXTABLE(4b,30b)
-	_ASM_EXTABLE(5b,30b)
-	_ASM_EXTABLE(6b,30b)
-	_ASM_EXTABLE(7b,30b)
-	_ASM_EXTABLE(8b,30b)
-	_ASM_EXTABLE(9b,30b)
-	_ASM_EXTABLE(10b,30b)
-	_ASM_EXTABLE(11b,30b)
-	_ASM_EXTABLE(12b,30b)
-	_ASM_EXTABLE(13b,30b)
-	_ASM_EXTABLE(14b,30b)
-	_ASM_EXTABLE(15b,30b)
-	_ASM_EXTABLE(16b,30b)
-	_ASM_EXTABLE(18b,40b)
-	_ASM_EXTABLE(19b,40b)
-	_ASM_EXTABLE(21b,50b)
-	_ASM_EXTABLE(22b,50b)
+	_ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
+	_ASM_EXTABLE(20b,.L_fixup_8b_copy)
+	_ASM_EXTABLE(21b,.L_fixup_8b_copy)
+	_ASM_EXTABLE(30b,.L_fixup_4b_copy)
+	_ASM_EXTABLE(31b,.L_fixup_4b_copy)
+	_ASM_EXTABLE(40b,.L_fixup_1b_copy)
+	_ASM_EXTABLE(41b,.L_fixup_1b_copy)
 ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index eef44d9..e830c71 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -287,6 +287,9 @@
 	if (!pmd_k)
 		return -1;
 
+	if (pmd_huge(*pmd_k))
+		return 0;
+
 	pte_k = pte_offset_kernel(pmd_k, address);
 	if (!pte_present(*pte_k))
 		return -1;
@@ -360,8 +363,6 @@
  * 64-bit:
  *
  *   Handle a fault on the vmalloc area
- *
- * This assumes no large pages in there.
  */
 static noinline int vmalloc_fault(unsigned long address)
 {
@@ -403,17 +404,23 @@
 	if (pud_none(*pud_ref))
 		return -1;
 
-	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
 		BUG();
 
+	if (pud_huge(*pud))
+		return 0;
+
 	pmd = pmd_offset(pud, address);
 	pmd_ref = pmd_offset(pud_ref, address);
 	if (pmd_none(*pmd_ref))
 		return -1;
 
-	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
 		BUG();
 
+	if (pmd_huge(*pmd))
+		return 0;
+
 	pte_ref = pte_offset_kernel(pmd_ref, address);
 	if (!pte_present(*pte_ref))
 		return -1;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 6d5eb59..d8a798d 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -102,7 +102,6 @@
 			return 0;
 		}
 
-		page = pte_page(pte);
 		if (pte_devmap(pte)) {
 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
 			if (unlikely(!pgmap)) {
@@ -115,6 +114,7 @@
 			return 0;
 		}
 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+		page = pte_page(pte);
 		get_page(page);
 		put_dev_pagemap(pgmap);
 		SetPageReferenced(page);
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 42982b2..740d7ac 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -173,10 +173,10 @@
 }
 __setup("hugepagesz=", setup_hugepagesz);
 
-#ifdef CONFIG_CMA
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 static __init int gigantic_pages_init(void)
 {
-	/* With CMA we can allocate gigantic pages at runtime */
+	/* With compaction or CMA we can allocate gigantic pages at runtime */
 	if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 	return 0;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index c3b3f65..d04f809 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -469,7 +469,7 @@
 {
 	int i, nid;
 	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
-	unsigned long start, end;
+	phys_addr_t start, end;
 	struct memblock_region *r;
 
 	/*
diff --git a/block/bio.c b/block/bio.c
index dbabd48..cf75915 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -874,7 +874,7 @@
 	bio->bi_private = &ret;
 	bio->bi_end_io = submit_bio_wait_endio;
 	submit_bio(rw, bio);
-	wait_for_completion(&ret.event);
+	wait_for_completion_io(&ret.event);
 
 	return ret.error;
 }
@@ -1090,9 +1090,12 @@
 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
 		/*
 		 * if we're in a workqueue, the request is orphaned, so
-		 * don't copy into a random user address space, just free.
+		 * don't copy into a random user address space, just free
+		 * and return -EINTR so user space doesn't expect any data.
 		 */
-		if (current->mm && bio_data_dir(bio) == READ)
+		if (!current->mm)
+			ret = -EINTR;
+		else if (bio_data_dir(bio) == READ)
 			ret = bio_copy_to_iter(bio, bmd->iter);
 		if (bmd->is_our_pages)
 			bio_free_pages(bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5a37188..66e6f1aa 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -788,6 +788,7 @@
 {
 	struct gendisk *disk;
 	struct blkcg_gq *blkg;
+	struct module *owner;
 	unsigned int major, minor;
 	int key_len, part, ret;
 	char *body;
@@ -804,7 +805,9 @@
 	if (!disk)
 		return -ENODEV;
 	if (part) {
+		owner = disk->fops->owner;
 		put_disk(disk);
+		module_put(owner);
 		return -ENODEV;
 	}
 
@@ -820,7 +823,9 @@
 		ret = PTR_ERR(blkg);
 		rcu_read_unlock();
 		spin_unlock_irq(disk->queue->queue_lock);
+		owner = disk->fops->owner;
 		put_disk(disk);
+		module_put(owner);
 		/*
 		 * If queue was bypassing, we should retry.  Do so after a
 		 * short msleep().  It isn't strictly necessary but queue
@@ -851,9 +856,13 @@
 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
 {
+	struct module *owner;
+
 	spin_unlock_irq(ctx->disk->queue->queue_lock);
 	rcu_read_unlock();
+	owner = ctx->disk->fops->owner;
 	put_disk(ctx->disk);
+	module_put(owner);
 }
 EXPORT_SYMBOL_GPL(blkg_conf_finish);
 
diff --git a/block/blk-core.c b/block/blk-core.c
index ab51685..b83d297 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2455,14 +2455,16 @@
 
 			rq = NULL;
 			break;
-		} else if (ret == BLKPREP_KILL) {
+		} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
+			int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
+
 			rq->cmd_flags |= REQ_QUIET;
 			/*
 			 * Mark this request as started so we don't trigger
 			 * any debug logic in the end I/O path.
 			 */
 			blk_start_request(rq);
-			__blk_end_request_all(rq, -EIO);
+			__blk_end_request_all(rq, err);
 		} else {
 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
 			break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4c0622f..56c0a72 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -599,8 +599,10 @@
 		 * If a request wasn't started before the queue was
 		 * marked dying, kill it here or it'll go unnoticed.
 		 */
-		if (unlikely(blk_queue_dying(rq->q)))
-			blk_mq_complete_request(rq, -EIO);
+		if (unlikely(blk_queue_dying(rq->q))) {
+			rq->errors = -EIO;
+			blk_mq_end_request(rq, rq->errors);
+		}
 		return;
 	}
 
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd49735..c7bb666 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,8 +91,8 @@
 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 	lim->virt_boundary_mask = 0;
 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
-	lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
-		BLK_SAFE_MAX_SECTORS;
+	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+	lim->max_dev_sectors = 0;
 	lim->chunk_sectors = 0;
 	lim->max_write_same_sectors = 0;
 	lim->max_discard_sectors = 0;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e140cc4..dd937630 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -147,10 +147,9 @@
 
 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
 {
-	unsigned long long val;
 
-	val = q->limits.max_hw_discard_sectors << 9;
-	return sprintf(page, "%llu\n", val);
+	return sprintf(page, "%llu\n",
+		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
 }
 
 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index a753df2..d0dd788 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -39,7 +39,6 @@
 	 */
 	struct request *next_rq[2];
 	unsigned int batching;		/* number of sequential requests made */
-	sector_t last_sector;		/* head position */
 	unsigned int starved;		/* times reads have starved writes */
 
 	/*
@@ -210,8 +209,6 @@
 	dd->next_rq[WRITE] = NULL;
 	dd->next_rq[data_dir] = deadline_latter_request(rq);
 
-	dd->last_sector = rq_end_sector(rq);
-
 	/*
 	 * take it off the sort and fifo list, move
 	 * to dispatch queue
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 38c1aa8..28556fc 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -65,18 +65,10 @@
 	struct skcipher_async_rsgl first_sgl;
 	struct list_head list;
 	struct scatterlist *tsg;
-	char iv[];
+	atomic_t *inflight;
+	struct skcipher_request req;
 };
 
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
-	crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
-	crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
-	crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
 		      sizeof(struct scatterlist) - 1)
 
@@ -102,15 +94,12 @@
 
 static void skcipher_async_cb(struct crypto_async_request *req, int err)
 {
-	struct sock *sk = req->data;
-	struct alg_sock *ask = alg_sk(sk);
-	struct skcipher_ctx *ctx = ask->private;
-	struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+	struct skcipher_async_req *sreq = req->data;
 	struct kiocb *iocb = sreq->iocb;
 
-	atomic_dec(&ctx->inflight);
+	atomic_dec(sreq->inflight);
 	skcipher_free_async_sgls(sreq);
-	kfree(req);
+	kzfree(sreq);
 	iocb->ki_complete(iocb, err, err);
 }
 
@@ -306,8 +295,11 @@
 {
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
+	struct sock *psk = ask->parent;
+	struct alg_sock *pask = alg_sk(psk);
 	struct skcipher_ctx *ctx = ask->private;
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+	struct skcipher_tfm *skc = pask->private;
+	struct crypto_skcipher *tfm = skc->skcipher;
 	unsigned ivsize = crypto_skcipher_ivsize(tfm);
 	struct skcipher_sg_list *sgl;
 	struct af_alg_control con = {};
@@ -509,37 +501,43 @@
 {
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
+	struct sock *psk = ask->parent;
+	struct alg_sock *pask = alg_sk(psk);
 	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_tfm *skc = pask->private;
+	struct crypto_skcipher *tfm = skc->skcipher;
 	struct skcipher_sg_list *sgl;
 	struct scatterlist *sg;
 	struct skcipher_async_req *sreq;
 	struct skcipher_request *req;
 	struct skcipher_async_rsgl *last_rsgl = NULL;
-	unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
-	unsigned int reqlen = sizeof(struct skcipher_async_req) +
-				GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+	unsigned int txbufs = 0, len = 0, tx_nents;
+	unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 	int err = -ENOMEM;
 	bool mark = false;
+	char *iv;
+
+	sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+	if (unlikely(!sreq))
+		goto out;
+
+	req = &sreq->req;
+	iv = (char *)(req + 1) + reqsize;
+	sreq->iocb = msg->msg_iocb;
+	INIT_LIST_HEAD(&sreq->list);
+	sreq->inflight = &ctx->inflight;
 
 	lock_sock(sk);
-	req = kmalloc(reqlen, GFP_KERNEL);
-	if (unlikely(!req))
-		goto unlock;
-
-	sreq = GET_SREQ(req, ctx);
-	sreq->iocb = msg->msg_iocb;
-	memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
-	INIT_LIST_HEAD(&sreq->list);
+	tx_nents = skcipher_all_sg_nents(ctx);
 	sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
-	if (unlikely(!sreq->tsg)) {
-		kfree(req);
+	if (unlikely(!sreq->tsg))
 		goto unlock;
-	}
 	sg_init_table(sreq->tsg, tx_nents);
-	memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
-	skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
-	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      skcipher_async_cb, sk);
+	memcpy(iv, ctx->iv, ivsize);
+	skcipher_request_set_tfm(req, tfm);
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+				      skcipher_async_cb, sreq);
 
 	while (iov_iter_count(&msg->msg_iter)) {
 		struct skcipher_async_rsgl *rsgl;
@@ -615,20 +613,22 @@
 		sg_mark_end(sreq->tsg + txbufs - 1);
 
 	skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
-				   len, sreq->iv);
+				   len, iv);
 	err = ctx->enc ? crypto_skcipher_encrypt(req) :
 			 crypto_skcipher_decrypt(req);
 	if (err == -EINPROGRESS) {
 		atomic_inc(&ctx->inflight);
 		err = -EIOCBQUEUED;
+		sreq = NULL;
 		goto unlock;
 	}
 free:
 	skcipher_free_async_sgls(sreq);
-	kfree(req);
 unlock:
 	skcipher_wmem_wakeup(sk);
 	release_sock(sk);
+	kzfree(sreq);
+out:
 	return err;
 }
 
@@ -637,9 +637,12 @@
 {
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
+	struct sock *psk = ask->parent;
+	struct alg_sock *pask = alg_sk(psk);
 	struct skcipher_ctx *ctx = ask->private;
-	unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
-		&ctx->req));
+	struct skcipher_tfm *skc = pask->private;
+	struct crypto_skcipher *tfm = skc->skcipher;
+	unsigned bs = crypto_skcipher_blocksize(tfm);
 	struct skcipher_sg_list *sgl;
 	struct scatterlist *sg;
 	int err = -EAGAIN;
@@ -947,7 +950,8 @@
 	ask->private = ctx;
 
 	skcipher_request_set_tfm(&ctx->req, skcipher);
-	skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+	skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+						 CRYPTO_TFM_REQ_MAY_BACKLOG,
 				      af_alg_complete, &ctx->completion);
 
 	sk->sk_destruct = skcipher_sock_destruct;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 237f379..43fe85f2 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -499,6 +499,7 @@
 		if (link->dump == NULL)
 			return -EINVAL;
 
+		down_read(&crypto_alg_sem);
 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
 
@@ -508,8 +509,11 @@
 				.done = link->done,
 				.min_dump_alloc = dump_alloc,
 			};
-			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+			err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
 		}
+		up_read(&crypto_alg_sem);
+
+		return err;
 	}
 
 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index c570b1d..0872d5f 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -880,7 +880,7 @@
 		break;
 	case BUS_NOTIFY_DRIVER_NOT_BOUND:
 	case BUS_NOTIFY_UNBOUND_DRIVER:
-		pdev->dev.pm_domain = NULL;
+		dev_pm_domain_set(&pdev->dev, NULL);
 		break;
 	case BUS_NOTIFY_ADD_DEVICE:
 		dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 594fcab..546a369 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -264,6 +264,26 @@
 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a4faa43..a44c75d 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -250,6 +250,7 @@
 	AHCI_HFLAG_MULTI_MSI		= 0,
 	AHCI_HFLAG_MULTI_MSIX		= 0,
 #endif
+	AHCI_HFLAG_WAKE_BEFORE_STOP	= (1 << 22), /* wake before DMA stop */
 
 	/* ap->flags bits */
 
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index b36cae2..e87bcec 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -317,6 +317,7 @@
 	if (IS_ERR(hpriv))
 		return PTR_ERR(hpriv);
 	hpriv->plat_data = priv;
+	hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
 
 	brcm_sata_alpm_init(hpriv);
 
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d61740e..4029679 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -496,8 +496,8 @@
 		}
 	}
 
-	/* fabricate port_map from cap.nr_ports */
-	if (!port_map) {
+	/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+	if (!port_map && vers < 0x10300) {
 		port_map = (1 << ahci_nr_ports(cap)) - 1;
 		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
 
@@ -593,8 +593,22 @@
 int ahci_stop_engine(struct ata_port *ap)
 {
 	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
 	u32 tmp;
 
+	/*
+	 * On some controllers, stopping a port's DMA engine while the port
+	 * is in ALPM state (partial or slumber) results in failures on
+	 * subsequent DMA engine starts.  For those controllers, put the
+	 * port back in active state before stopping its DMA engine.
+	 */
+	if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
+	    (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
+	    ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
+		dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
+		return -EIO;
+	}
+
 	tmp = readl(port_mmio + PORT_CMD);
 
 	/* check if the HBA is idle */
@@ -689,6 +703,9 @@
 	void __iomem *port_mmio = ahci_port_base(ap);
 
 	if (policy != ATA_LPM_MAX_POWER) {
+		/* wakeup flag only applies to the max power policy */
+		hints &= ~ATA_LPM_WAKE_ONLY;
+
 		/*
 		 * Disable interrupts on Phy Ready. This keeps us from
 		 * getting woken up due to spurious phy ready
@@ -704,7 +721,8 @@
 		u32 cmd = readl(port_mmio + PORT_CMD);
 
 		if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
-			cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
+			if (!(hints & ATA_LPM_WAKE_ONLY))
+				cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
 			cmd |= PORT_CMD_ICC_ACTIVE;
 
 			writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +730,9 @@
 
 			/* wait 10ms to be sure we've come out of LPM state */
 			ata_msleep(ap, 10);
+
+			if (hints & ATA_LPM_WAKE_ONLY)
+				return 0;
 		} else {
 			cmd |= PORT_CMD_ALPE;
 			if (policy == ATA_LPM_MIN_POWER)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cbb7471..55e257c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4125,6 +4125,7 @@
 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
+	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
 	/* Odd clown on sil3726/4726 PMPs */
 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
 
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index cdf6215..051b615 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -997,12 +997,9 @@
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 {
 	struct ata_port *ap = qc->ap;
-	unsigned long flags;
 
 	if (ap->ops->error_handler) {
 		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
-
 			/* EH might have kicked in while host lock is
 			 * released.
 			 */
@@ -1014,8 +1011,6 @@
 				} else
 					ata_port_freeze(ap);
 			}
-
-			spin_unlock_irqrestore(ap->lock, flags);
 		} else {
 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
 				ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@
 		}
 	} else {
 		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
 			ata_sff_irq_on(ap);
 			ata_qc_complete(qc);
-			spin_unlock_irqrestore(ap->lock, flags);
 		} else
 			ata_qc_complete(qc);
 	}
@@ -1048,9 +1041,10 @@
 {
 	struct ata_link *link = qc->dev->link;
 	struct ata_eh_info *ehi = &link->eh_info;
-	unsigned long flags = 0;
 	int poll_next;
 
+	lockdep_assert_held(ap->lock);
+
 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 
 	/* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@
 			}
 		}
 
-		/* Send the CDB (atapi) or the first data block (ata pio out).
-		 * During the state transition, interrupt handler shouldn't
-		 * be invoked before the data transfer is complete and
-		 * hsm_task_state is changed. Hence, the following locking.
-		 */
-		if (in_wq)
-			spin_lock_irqsave(ap->lock, flags);
-
 		if (qc->tf.protocol == ATA_PROT_PIO) {
 			/* PIO data out protocol.
 			 * send first data block.
@@ -1135,9 +1121,6 @@
 			/* send CDB */
 			atapi_send_cdb(ap, qc);
 
-		if (in_wq)
-			spin_unlock_irqrestore(ap->lock, flags);
-
 		/* if polling, ata_sff_pio_task() handles the rest.
 		 * otherwise, interrupt handler takes over from here.
 		 */
@@ -1296,7 +1279,8 @@
 		break;
 	default:
 		poll_next = 0;
-		BUG();
+		WARN(true, "ata%d: SFF host state machine in invalid state %d",
+		     ap->print_id, ap->hsm_task_state);
 	}
 
 	return poll_next;
@@ -1361,12 +1345,14 @@
 	u8 status;
 	int poll_next;
 
+	spin_lock_irq(ap->lock);
+
 	BUG_ON(ap->sff_pio_task_link == NULL);
 	/* qc can be NULL if timeout occurred */
 	qc = ata_qc_from_tag(ap, link->active_tag);
 	if (!qc) {
 		ap->sff_pio_task_link = NULL;
-		return;
+		goto out_unlock;
 	}
 
 fsm_start:
@@ -1381,11 +1367,14 @@
 	 */
 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
 	if (status & ATA_BUSY) {
+		spin_unlock_irq(ap->lock);
 		ata_msleep(ap, 2);
+		spin_lock_irq(ap->lock);
+
 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
 		if (status & ATA_BUSY) {
 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
-			return;
+			goto out_unlock;
 		}
 	}
 
@@ -1402,6 +1391,8 @@
 	 */
 	if (poll_next)
 		goto fsm_start;
+out_unlock:
+	spin_unlock_irq(ap->lock);
 }
 
 /**
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 89f5cf68..04a1582 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -206,6 +206,8 @@
 		if (mc->release)
 			mc->release(master, mc->data);
 	}
+
+	kfree(match->compare);
 }
 
 static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@
 	if (match->alloc == num)
 		return 0;
 
-	new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL);
+	new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 
 	if (match->compare) {
 		memcpy(new, match->compare, sizeof(*new) *
 					    min(match->num, num));
-		devm_kfree(dev, match->compare);
+		kfree(match->compare);
 	}
 	match->compare = new;
 	match->alloc = num;
@@ -283,6 +285,24 @@
 }
 EXPORT_SYMBOL(component_match_add_release);
 
+static void free_master(struct master *master)
+{
+	struct component_match *match = master->match;
+	int i;
+
+	list_del(&master->node);
+
+	if (match) {
+		for (i = 0; i < match->num; i++) {
+			struct component *c = match->compare[i].component;
+			if (c)
+				c->master = NULL;
+		}
+	}
+
+	kfree(master);
+}
+
 int component_master_add_with_match(struct device *dev,
 	const struct component_master_ops *ops,
 	struct component_match *match)
@@ -309,11 +329,9 @@
 
 	ret = try_to_bring_up_master(master, NULL);
 
-	if (ret < 0) {
-		/* Delete off the list if we weren't successful */
-		list_del(&master->node);
-		kfree(master);
-	}
+	if (ret < 0)
+		free_master(master);
+
 	mutex_unlock(&component_mutex);
 
 	return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@
 	const struct component_master_ops *ops)
 {
 	struct master *master;
-	int i;
 
 	mutex_lock(&component_mutex);
 	master = __master_find(dev, ops);
 	if (master) {
-		struct component_match *match = master->match;
-
 		take_down_master(master);
-
-		list_del(&master->node);
-
-		if (match) {
-			for (i = 0; i < match->num; i++) {
-				struct component *c = match->compare[i].component;
-				if (c)
-					c->master = NULL;
-			}
-		}
-		kfree(master);
+		free_master(master);
 	}
 	mutex_unlock(&component_mutex);
 }
@@ -486,6 +491,8 @@
 
 	ret = try_to_bring_up_masters(component);
 	if (ret < 0) {
+		if (component->master)
+			remove_component(component->master, component);
 		list_del(&component->node);
 
 		kfree(component);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 93ed14c..f6a9ad5 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -146,7 +146,7 @@
 	if (dev->pm_domain == pd)
 		return;
 
-	WARN(device_is_bound(dev),
+	WARN(pd && device_is_bound(dev),
 	     "PM domains can only be changed for unbound devices\n");
 	dev->pm_domain = pd;
 	device_pm_check_callbacks(dev);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 784dbe8..301b785 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -173,14 +173,14 @@
 }
 
 /**
- * __genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
  * @depth: nesting count for lockdep.
  *
  * Restore power to @genpd and all of its masters so that it is possible to
  * resume a device belonging to it.
  */
-static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
+static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
 {
 	struct gpd_link *link;
 	int ret = 0;
@@ -200,7 +200,7 @@
 		genpd_sd_counter_inc(master);
 
 		mutex_lock_nested(&master->lock, depth + 1);
-		ret = __genpd_poweron(master, depth + 1);
+		ret = genpd_poweron(master, depth + 1);
 		mutex_unlock(&master->lock);
 
 		if (ret) {
@@ -227,21 +227,6 @@
 	return ret;
 }
 
-/**
- * genpd_poweron - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- */
-static int genpd_poweron(struct generic_pm_domain *genpd)
-{
-	int ret;
-
-	mutex_lock(&genpd->lock);
-	ret = __genpd_poweron(genpd, 0);
-	mutex_unlock(&genpd->lock);
-	return ret;
-}
-
-
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 {
 	return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
@@ -489,7 +474,7 @@
 	}
 
 	mutex_lock(&genpd->lock);
-	ret = __genpd_poweron(genpd, 0);
+	ret = genpd_poweron(genpd, 0);
 	mutex_unlock(&genpd->lock);
 
 	if (ret)
@@ -1821,8 +1806,10 @@
 
 	dev->pm_domain->detach = genpd_dev_pm_detach;
 	dev->pm_domain->sync = genpd_dev_pm_sync;
-	ret = genpd_poweron(pd);
 
+	mutex_lock(&pd->lock);
+	ret = genpd_poweron(pd, 0);
+	mutex_unlock(&pd->lock);
 out:
 	return ret ? -EPROBE_DEFER : 0;
 }
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8812bfb..eea5156 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -133,17 +133,17 @@
 	while (val_size) {
 		switch (ctx->val_bytes) {
 		case 1:
-			__raw_writeb(*(u8 *)val, ctx->regs + offset);
+			writeb(*(u8 *)val, ctx->regs + offset);
 			break;
 		case 2:
-			__raw_writew(*(u16 *)val, ctx->regs + offset);
+			writew(*(u16 *)val, ctx->regs + offset);
 			break;
 		case 4:
-			__raw_writel(*(u32 *)val, ctx->regs + offset);
+			writel(*(u32 *)val, ctx->regs + offset);
 			break;
 #ifdef CONFIG_64BIT
 		case 8:
-			__raw_writeq(*(u64 *)val, ctx->regs + offset);
+			writeq(*(u64 *)val, ctx->regs + offset);
 			break;
 #endif
 		default:
@@ -193,17 +193,17 @@
 	while (val_size) {
 		switch (ctx->val_bytes) {
 		case 1:
-			*(u8 *)val = __raw_readb(ctx->regs + offset);
+			*(u8 *)val = readb(ctx->regs + offset);
 			break;
 		case 2:
-			*(u16 *)val = __raw_readw(ctx->regs + offset);
+			*(u16 *)val = readw(ctx->regs + offset);
 			break;
 		case 4:
-			*(u32 *)val = __raw_readl(ctx->regs + offset);
+			*(u32 *)val = readl(ctx->regs + offset);
 			break;
 #ifdef CONFIG_64BIT
 		case 8:
-			*(u64 *)val = __raw_readq(ctx->regs + offset);
+			*(u64 *)val = readq(ctx->regs + offset);
 			break;
 #endif
 		default:
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e25120..84708a5 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -866,7 +866,7 @@
 }
 
 /* locks the driver */
-static int lock_fdc(int drive, bool interruptible)
+static int lock_fdc(int drive)
 {
 	if (WARN(atomic_read(&usage_count) == 0,
 		 "Trying to lock fdc while usage count=0\n"))
@@ -2173,7 +2173,7 @@
 {
 	int ret;
 
-	if (lock_fdc(drive, true))
+	if (lock_fdc(drive))
 		return -EINTR;
 
 	set_floppy(drive);
@@ -2960,7 +2960,7 @@
 {
 	int ret;
 
-	if (lock_fdc(drive, interruptible))
+	if (lock_fdc(drive))
 		return -EINTR;
 
 	if (arg == FD_RESET_ALWAYS)
@@ -3243,7 +3243,7 @@
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
 		mutex_lock(&open_lock);
-		if (lock_fdc(drive, true)) {
+		if (lock_fdc(drive)) {
 			mutex_unlock(&open_lock);
 			return -EINTR;
 		}
@@ -3263,7 +3263,7 @@
 	} else {
 		int oldStretch;
 
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 		if (cmd != FDDEFPRM) {
 			/* notice a disk change immediately, else
@@ -3349,7 +3349,7 @@
 	if (type)
 		*g = &floppy_type[type];
 	else {
-		if (lock_fdc(drive, false))
+		if (lock_fdc(drive))
 			return -EINTR;
 		if (poll_drive(false, 0) == -EINTR)
 			return -EINTR;
@@ -3433,7 +3433,7 @@
 		if (UDRS->fd_ref != 1)
 			/* somebody else has this drive open */
 			return -EBUSY;
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 
 		/* do the actual eject. Fails on
@@ -3445,7 +3445,7 @@
 		process_fd_request();
 		return ret;
 	case FDCLRPRM:
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 		current_type[drive] = NULL;
 		floppy_sizes[drive] = MAX_DISK_SIZE << 1;
@@ -3467,7 +3467,7 @@
 		UDP->flags &= ~FTD_MSG;
 		return 0;
 	case FDFMTBEG:
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 		if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
 			return -EINTR;
@@ -3484,7 +3484,7 @@
 		return do_format(drive, &inparam.f);
 	case FDFMTEND:
 	case FDFLUSH:
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 		return invalidate_drive(bdev);
 	case FDSETEMSGTRESH:
@@ -3507,7 +3507,7 @@
 		outparam = UDP;
 		break;
 	case FDPOLLDRVSTAT:
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 		if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
 			return -EINTR;
@@ -3530,7 +3530,7 @@
 	case FDRAWCMD:
 		if (type)
 			return -EINVAL;
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 		set_floppy(drive);
 		i = raw_cmd_ioctl(cmd, (void __user *)param);
@@ -3539,7 +3539,7 @@
 		process_fd_request();
 		return i;
 	case FDTWADDLE:
-		if (lock_fdc(drive, true))
+		if (lock_fdc(drive))
 			return -EINTR;
 		twaddle();
 		process_fd_request();
@@ -3663,6 +3663,11 @@
 
 	opened_bdev[drive] = bdev;
 
+	if (!(mode & (FMODE_READ|FMODE_WRITE))) {
+		res = -EINVAL;
+		goto out;
+	}
+
 	res = -ENXIO;
 
 	if (!floppy_track_buffer) {
@@ -3706,21 +3711,20 @@
 	if (UFDCS->rawcmd == 1)
 		UFDCS->rawcmd = 2;
 
-	if (!(mode & FMODE_NDELAY)) {
-		if (mode & (FMODE_READ|FMODE_WRITE)) {
-			UDRS->last_checked = 0;
-			clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
-			check_disk_change(bdev);
-			if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
-				goto out;
-			if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
-				goto out;
-		}
-		res = -EROFS;
-		if ((mode & FMODE_WRITE) &&
-		    !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
-			goto out;
-	}
+	UDRS->last_checked = 0;
+	clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+	check_disk_change(bdev);
+	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+		goto out;
+	if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+		goto out;
+
+	res = -EROFS;
+
+	if ((mode & FMODE_WRITE) &&
+			!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+		goto out;
+
 	mutex_unlock(&open_lock);
 	mutex_unlock(&floppy_mutex);
 	return 0;
@@ -3748,7 +3752,8 @@
 		return DISK_EVENT_MEDIA_CHANGE;
 
 	if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
-		lock_fdc(drive, false);
+		if (lock_fdc(drive))
+			return -EINTR;
 		poll_drive(false, 0);
 		process_fd_request();
 	}
@@ -3847,7 +3852,9 @@
 			 "VFS: revalidate called on non-open device.\n"))
 			return -EFAULT;
 
-		lock_fdc(drive, false);
+		res = lock_fdc(drive);
+		if (res)
+			return res;
 		cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 		      test_bit(FD_VERIFY_BIT, &UDRS->flags));
 		if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8ba1e97..64a7b59 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -478,7 +478,7 @@
 	id->ver_id = 0x1;
 	id->vmnt = 0;
 	id->cgrps = 1;
-	id->cap = 0x3;
+	id->cap = 0x2;
 	id->dom = 0x1;
 
 	id->ppaf.blk_offset = 0;
@@ -707,9 +707,7 @@
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
 
-
 	mutex_lock(&lock);
-	list_add_tail(&nullb->list, &nullb_list);
 	nullb->index = nullb_indexes++;
 	mutex_unlock(&lock);
 
@@ -743,6 +741,10 @@
 	strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
 
 	add_disk(disk);
+
+	mutex_lock(&lock);
+	list_add_tail(&nullb->list, &nullb_list);
+	mutex_unlock(&lock);
 done:
 	return 0;
 
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8a8dc91..83eb9e6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1873,6 +1873,43 @@
 	return err;
 }
 
+static int negotiate_mq(struct blkfront_info *info)
+{
+	unsigned int backend_max_queues = 0;
+	int err;
+	unsigned int i;
+
+	BUG_ON(info->nr_rings);
+
+	/* Check if backend supports multiple queues. */
+	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+			   "multi-queue-max-queues", "%u", &backend_max_queues);
+	if (err < 0)
+		backend_max_queues = 1;
+
+	info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
+	/* We need at least one ring. */
+	if (!info->nr_rings)
+		info->nr_rings = 1;
+
+	info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
+	if (!info->rinfo) {
+		xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < info->nr_rings; i++) {
+		struct blkfront_ring_info *rinfo;
+
+		rinfo = &info->rinfo[i];
+		INIT_LIST_HEAD(&rinfo->indirect_pages);
+		INIT_LIST_HEAD(&rinfo->grants);
+		rinfo->dev_info = info;
+		INIT_WORK(&rinfo->work, blkif_restart_queue);
+		spin_lock_init(&rinfo->ring_lock);
+	}
+	return 0;
+}
 /**
  * Entry point to this code when a new device is created.  Allocate the basic
  * structures and the ring buffer for communication with the backend, and
@@ -1883,9 +1920,7 @@
 			  const struct xenbus_device_id *id)
 {
 	int err, vdevice;
-	unsigned int r_index;
 	struct blkfront_info *info;
-	unsigned int backend_max_queues = 0;
 
 	/* FIXME: Use dynamic device id if this is not set. */
 	err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1936,33 +1971,10 @@
 	}
 
 	info->xbdev = dev;
-	/* Check if backend supports multiple queues. */
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "multi-queue-max-queues", "%u", &backend_max_queues);
-	if (err < 0)
-		backend_max_queues = 1;
-
-	info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
-	/* We need at least one ring. */
-	if (!info->nr_rings)
-		info->nr_rings = 1;
-
-	info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
-	if (!info->rinfo) {
-		xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
+	err = negotiate_mq(info);
+	if (err) {
 		kfree(info);
-		return -ENOMEM;
-	}
-
-	for (r_index = 0; r_index < info->nr_rings; r_index++) {
-		struct blkfront_ring_info *rinfo;
-
-		rinfo = &info->rinfo[r_index];
-		INIT_LIST_HEAD(&rinfo->indirect_pages);
-		INIT_LIST_HEAD(&rinfo->grants);
-		rinfo->dev_info = info;
-		INIT_WORK(&rinfo->work, blkif_restart_queue);
-		spin_lock_init(&rinfo->ring_lock);
+		return err;
 	}
 
 	mutex_init(&info->mutex);
@@ -2123,12 +2135,16 @@
 static int blkfront_resume(struct xenbus_device *dev)
 {
 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
-	int err;
+	int err = 0;
 
 	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
 
 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
+	err = negotiate_mq(info);
+	if (err)
+		return err;
+
 	err = talk_to_blkback(dev, info);
 
 	/*
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 129d47b..9a92c07 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -132,7 +132,7 @@
 	  and AC100/AC200 ICs.
 
 config UNIPHIER_SYSTEM_BUS
-	bool "UniPhier System Bus driver"
+	tristate "UniPhier System Bus driver"
 	depends on ARCH_UNIPHIER && OF
 	default y
 	help
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index 6575c0f..c3cb76b 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -192,8 +192,10 @@
 	/* Need the config devices early, before the "normal" devices... */
 	for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
 		err = vexpress_config_populate(node);
-		if (err)
+		if (err) {
+			of_node_put(node);
 			break;
+		}
 	}
 
 	return err;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 240b6cf..be54e53 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -42,7 +42,7 @@
 /*
  * The High Precision Event Timer driver.
  * This driver is closely modelled after the rtc.c driver.
- * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
+ * See HPET spec revision 1.
  */
 #define	HPET_USER_FREQ	(64)
 #define	HPET_DRIFT	(500)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 9fda22e..7fddd86 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -68,6 +68,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/acpi.h>
 
 #ifdef CONFIG_PARISC
 #include <asm/hardware.h>	/* for register_parisc_driver() stuff */
@@ -2054,8 +2055,6 @@
 
 #ifdef CONFIG_ACPI
 
-#include <linux/acpi.h>
-
 /*
  * Once we get an ACPI failure, we don't try any more, because we go
  * through the tables sequentially.  Once we don't find a table, there
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b038e36..bae4be6 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,7 +43,7 @@
 obj-$(CONFIG_COMMON_CLK_SI570)		+= clk-si570.o
 obj-$(CONFIG_COMMON_CLK_CDCE925)	+= clk-cdce925.o
 obj-$(CONFIG_ARCH_STM32)		+= clk-stm32f4.o
-obj-$(CONFIG_ARCH_TANGOX)		+= clk-tango4.o
+obj-$(CONFIG_ARCH_TANGO)		+= clk-tango4.o
 obj-$(CONFIG_CLK_TWL6040)		+= clk-twl6040.o
 obj-$(CONFIG_ARCH_U300)			+= clk-u300.o
 obj-$(CONFIG_ARCH_VT8500)		+= clk-vt8500.o
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 19fed65..7b09a26 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -289,7 +289,7 @@
 
 	num_parents = of_clk_get_parent_count(node);
 	if (num_parents < 0)
-		return;
+		num_parents = 0;
 
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f272..89e9ca7 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -299,7 +299,7 @@
 	/* Add the virtual cpufreq device */
 	cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
 						      -1, NULL, 0);
-	if (!cpufreq_dev)
+	if (IS_ERR(cpufreq_dev))
 		pr_warn("unable to register cpufreq device");
 
 	return 0;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index d5c5bfa..3e0b52d 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -247,7 +247,7 @@
 
 void __init dove_divider_clk_init(struct device_node *np)
 {
-	void *base;
+	void __iomem *base;
 
 	base = of_iomap(np, 0);
 	if (WARN_ON(!base))
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index cf73e53..070037a 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3587,7 +3587,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x1fc0,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc gcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b692ae8..dd5402b 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3005,7 +3005,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x3e40,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc gcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f6a2b14..ad41303 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2702,7 +2702,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x363c,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc gcc_msm8660_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index e3bf09d..8cc9b28 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -3336,7 +3336,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x80000,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc gcc_msm8916_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f31111e..983dd7d 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3468,7 +3468,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x3660,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct regmap_config gcc_apq8064_regmap_config = {
@@ -3477,7 +3476,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x3880,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc gcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index df164d6..335952d 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2680,7 +2680,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x1fc0,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc gcc_msm8974_desc = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 62e79fa..db3998e 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -419,7 +419,6 @@
 	.val_bits	= 32,
 	.max_register	= 0xfc,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc lcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index bf95bb0..4fcf9d1 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -524,7 +524,6 @@
 	.val_bits	= 32,
 	.max_register	= 0xfc,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc lcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1e703fd..30777f9 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3368,7 +3368,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x5104,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc mmcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index d73a048..00e3619 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -3029,7 +3029,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x334,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct regmap_config mmcc_apq8064_regmap_config = {
@@ -3038,7 +3037,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x350,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc mmcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bbe28ed..9d790bc 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2594,7 +2594,6 @@
 	.val_bits	= 32,
 	.max_register	= 0x5104,
 	.fast_io	= true,
-	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct qcom_cc_desc mmcc_msm8974_desc = {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index ebce980..bc7fbac 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -133,7 +133,7 @@
 PNAME(mux_uart0_p)	= { "uart0_src", "uart0_frac", "xin24m" };
 PNAME(mux_uart1_p)	= { "uart1_src", "uart1_frac", "xin24m" };
 PNAME(mux_uart2_p)	= { "uart2_src", "uart2_frac", "xin24m" };
-PNAME(mux_mac_p)	= { "mac_pll_src", "ext_gmac" };
+PNAME(mux_mac_p)	= { "mac_pll_src", "rmii_clkin" };
 PNAME(mux_dclk_p)	= { "dclk_lcdc", "dclk_cru" };
 
 static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = {
@@ -224,16 +224,16 @@
 			RK2928_CLKGATE_CON(2), 2, GFLAGS),
 
 	COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED,
-			RK2928_CLKSEL_CON(2), 4, 1, DFLAGS,
+			RK2928_CLKSEL_CON(2), 4, 1, MFLAGS,
 			RK2928_CLKGATE_CON(1), 0, GFLAGS),
 	COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED,
-			RK2928_CLKSEL_CON(2), 5, 1, DFLAGS,
+			RK2928_CLKSEL_CON(2), 5, 1, MFLAGS,
 			RK2928_CLKGATE_CON(1), 1, GFLAGS),
 	COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED,
-			RK2928_CLKSEL_CON(2), 6, 1, DFLAGS,
+			RK2928_CLKSEL_CON(2), 6, 1, MFLAGS,
 			RK2928_CLKGATE_CON(2), 4, GFLAGS),
 	COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED,
-			RK2928_CLKSEL_CON(2), 7, 1, DFLAGS,
+			RK2928_CLKSEL_CON(2), 7, 1, MFLAGS,
 			RK2928_CLKGATE_CON(2), 5, GFLAGS),
 
 	MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
@@ -242,11 +242,11 @@
 			RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
 			RK2928_CLKGATE_CON(1), 8, GFLAGS),
 	COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0,
-			RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
-			RK2928_CLKGATE_CON(1), 8, GFLAGS),
+			RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(1), 10, GFLAGS),
 	COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0,
-			RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
-			RK2928_CLKGATE_CON(1), 8, GFLAGS),
+			RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(1), 12, GFLAGS),
 	COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
 			RK2928_CLKSEL_CON(17), 0,
 			RK2928_CLKGATE_CON(1), 9, GFLAGS,
@@ -279,13 +279,13 @@
 			RK2928_CLKGATE_CON(3), 2, GFLAGS),
 
 	COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0,
-			RK2928_CLKSEL_CON(12), 8, 2, DFLAGS,
+			RK2928_CLKSEL_CON(12), 8, 2, MFLAGS,
 			RK2928_CLKGATE_CON(2), 11, GFLAGS),
 	DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0,
 			RK2928_CLKSEL_CON(11), 0, 7, DFLAGS),
 
 	COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
-			RK2928_CLKSEL_CON(12), 10, 2, DFLAGS,
+			RK2928_CLKSEL_CON(12), 10, 2, MFLAGS,
 			RK2928_CLKGATE_CON(2), 13, GFLAGS),
 	DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
 			RK2928_CLKSEL_CON(11), 8, 7, DFLAGS),
@@ -344,12 +344,12 @@
 			RK2928_CLKGATE_CON(10), 5, GFLAGS),
 
 	COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
-			RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS),
+			RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
 	MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
 			RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
 
 	COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
-			RK2928_CLKSEL_CON(21), 9, 5, DFLAGS,
+			RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
 			RK2928_CLKGATE_CON(2), 6, GFLAGS),
 
 	MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index be0ede5..21f3ea9 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -780,13 +780,13 @@
 	GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
 
 	/* pclk_pd_alive gates */
-	GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS),
-	GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS),
-	GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS),
-	GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS),
-	GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS),
-	GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS),
-	GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS),
+	GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS),
+	GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS),
+	GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS),
+	GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS),
+	GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS),
+	GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS),
+	GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS),
 
 	/*
 	 * pclk_vio gates
@@ -796,12 +796,12 @@
 	GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
 
 	/* pclk_pd_pmu gates */
-	GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS),
-	GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS),
-	GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS),
-	GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
-	GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS),
-	GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
+	GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
+	GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS),
+	GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS),
+	GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS),
+	GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS),
+	GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS),
 
 	/* timer gates */
 	GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index e1fe8f3..74e7544 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -450,8 +450,10 @@
 		struct emc_timing *timing = tegra->timings + (i++);
 
 		err = load_one_timing_from_dt(tegra, timing, child);
-		if (err)
+		if (err) {
+			of_node_put(child);
 			return err;
+		}
 
 		timing->ram_code = ram_code;
 	}
@@ -499,9 +501,9 @@
 		 * fuses until the apbmisc driver is loaded.
 		 */
 		err = load_timings_from_dt(tegra, node, node_ram_code);
+		of_node_put(node);
 		if (err)
 			return ERR_PTR(err);
-		of_node_put(node);
 		break;
 	}
 
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 19ce073..62ea381 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -11,6 +11,7 @@
 	tegra_clk_afi,
 	tegra_clk_amx,
 	tegra_clk_amx1,
+	tegra_clk_apb2ape,
 	tegra_clk_apbdma,
 	tegra_clk_apbif,
 	tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index a534bfa..6ac3f84 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -86,15 +86,21 @@
 #define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
 				PLLE_SS_CNTL_SSC_BYP)
 #define PLLE_SS_MAX_MASK 0x1ff
-#define PLLE_SS_MAX_VAL 0x25
+#define PLLE_SS_MAX_VAL_TEGRA114 0x25
+#define PLLE_SS_MAX_VAL_TEGRA210 0x21
 #define PLLE_SS_INC_MASK (0xff << 16)
 #define PLLE_SS_INC_VAL (0x1 << 16)
 #define PLLE_SS_INCINTRV_MASK (0x3f << 24)
-#define PLLE_SS_INCINTRV_VAL (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
 #define PLLE_SS_COEFFICIENTS_MASK \
 	(PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
-#define PLLE_SS_COEFFICIENTS_VAL \
-	(PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
+	(PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
+	 PLLE_SS_INCINTRV_VAL_TEGRA114)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
+	(PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
+	 PLLE_SS_INCINTRV_VAL_TEGRA210)
 
 #define PLLE_AUX_PLLP_SEL	BIT(2)
 #define PLLE_AUX_USE_LOCKDET	BIT(3)
@@ -880,7 +886,7 @@
 static int clk_plle_enable(struct clk_hw *hw)
 {
 	struct tegra_clk_pll *pll = to_clk_pll(hw);
-	unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+	unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 	struct tegra_clk_pll_freq_table sel;
 	u32 val;
 	int err;
@@ -1378,7 +1384,7 @@
 	u32 val;
 	int ret;
 	unsigned long flags = 0;
-	unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+	unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 
 	if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
 		return -EINVAL;
@@ -1401,7 +1407,7 @@
 	val |= PLLE_MISC_IDDQ_SW_CTRL;
 	val &= ~PLLE_MISC_IDDQ_SW_VALUE;
 	val |= PLLE_MISC_PLLE_PTS;
-	val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+	val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
 	pll_writel_misc(val, pll);
 	udelay(5);
 
@@ -1428,7 +1434,7 @@
 	val = pll_readl(PLLE_SS_CTRL, pll);
 	val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
 	val &= ~PLLE_SS_COEFFICIENTS_MASK;
-	val |= PLLE_SS_COEFFICIENTS_VAL;
+	val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
 	pll_writel(val, PLLE_SS_CTRL, pll);
 	val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
 	pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@
 	struct tegra_clk_pll *pll = to_clk_pll(hw);
 	struct tegra_clk_pll_freq_table sel;
 	u32 val;
-	int ret;
+	int ret = 0;
 	unsigned long flags = 0;
-	unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+	unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 
 	if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
 		return -EINVAL;
@@ -2022,22 +2028,20 @@
 	if (pll->lock)
 		spin_lock_irqsave(pll->lock, flags);
 
+	val = pll_readl(pll->params->aux_reg, pll);
+	if (val & PLLE_AUX_SEQ_ENABLE)
+		goto out;
+
 	val = pll_readl_base(pll);
 	val &= ~BIT(30); /* Disable lock override */
 	pll_writel_base(val, pll);
 
-	val = pll_readl(pll->params->aux_reg, pll);
-	val |= PLLE_AUX_ENABLE_SWCTL;
-	val &= ~PLLE_AUX_SEQ_ENABLE;
-	pll_writel(val, pll->params->aux_reg, pll);
-	udelay(1);
-
 	val = pll_readl_misc(pll);
 	val |= PLLE_MISC_LOCK_ENABLE;
 	val |= PLLE_MISC_IDDQ_SW_CTRL;
 	val &= ~PLLE_MISC_IDDQ_SW_VALUE;
 	val |= PLLE_MISC_PLLE_PTS;
-	val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+	val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
 	pll_writel_misc(val, pll);
 	udelay(5);
 
@@ -2067,7 +2071,7 @@
 	val = pll_readl(PLLE_SS_CTRL, pll);
 	val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
 	val &= ~PLLE_SS_COEFFICIENTS_MASK;
-	val |= PLLE_SS_COEFFICIENTS_VAL;
+	val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
 	pll_writel(val, PLLE_SS_CTRL, pll);
 	val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
 	pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@
 	if (pll->lock)
 		spin_lock_irqsave(pll->lock, flags);
 
+	/* If PLLE HW sequencer is enabled, SW should not disable PLLE */
+	val = pll_readl(pll->params->aux_reg, pll);
+	if (val & PLLE_AUX_SEQ_ENABLE)
+		goto out;
+
 	val = pll_readl_base(pll);
 	val &= ~PLLE_BASE_ENABLE;
 	pll_writel_base(val, pll);
 
+	val = pll_readl(pll->params->aux_reg, pll);
+	val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
+	pll_writel(val, pll->params->aux_reg, pll);
+
 	val = pll_readl_misc(pll);
 	val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
 	pll_writel_misc(val, pll);
 	udelay(1);
 
+out:
 	if (pll->lock)
 		spin_unlock_irqrestore(pll->lock, flags);
 }
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 6ad381a..ea2b9cbf 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -773,7 +773,7 @@
 	XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
 	XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
 	MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
-	MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
+	MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
 	MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
 	MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
 	MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@
 	NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
 	MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
 	MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
-	MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c),
+	I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
 	MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
 	MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
 	MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@
 	GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
 	GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
 	GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
+	GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
 };
 
 static struct tegra_periph_init_data div_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4559a20..474de0f 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -67,7 +67,7 @@
 					 "pll_p", "pll_p_out4", "unused",
 					 "unused", "pll_x", "pll_x_out0" };
 
-const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
 	.gen = gen4,
 	.sclk_parents = sclk_parents,
 	.cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@
 					"unused", "unused", "unused", "unused",
 					"dfllCPU_out" };
 
-const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
 	.gen = gen5,
 	.sclk_parents = sclk_parents_gen5,
 	.cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@
 	*dt_clk = clk;
 }
 
-void __init tegra_super_clk_init(void __iomem *clk_base,
+static void __init tegra_super_clk_init(void __iomem *clk_base,
 				void __iomem *pmc_base,
 				struct tegra_clk *tegra_clks,
 				struct tegra_clk_pll_params *params,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 58514c4..637041f 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -59,8 +59,8 @@
 #define PLLC3_MISC3 0x50c
 
 #define PLLM_BASE 0x90
-#define PLLM_MISC0 0x9c
 #define PLLM_MISC1 0x98
+#define PLLM_MISC2 0x9c
 #define PLLP_BASE 0xa0
 #define PLLP_MISC0 0xac
 #define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
 #define PLLC4_MISC0 0x5a8
 #define PLLC4_OUT 0x5e4
 #define PLLMB_BASE 0x5e8
-#define PLLMB_MISC0 0x5ec
+#define PLLMB_MISC1 0x5ec
 #define PLLA1_BASE 0x6a4
 #define PLLA1_MISC0 0x6a8
 #define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@
 };
 
 static const char *mux_pllmcp_clkm[] = {
-	"pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
+	"pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
+	"pll_p",
 };
 #define mux_pllmcp_clkm_idx NULL
 
@@ -367,12 +368,12 @@
 /* PLLMB */
 #define PLLMB_BASE_LOCK			(1 << 27)
 
-#define PLLMB_MISC0_LOCK_OVERRIDE	(1 << 18)
-#define PLLMB_MISC0_IDDQ		(1 << 17)
-#define PLLMB_MISC0_LOCK_ENABLE		(1 << 16)
+#define PLLMB_MISC1_LOCK_OVERRIDE	(1 << 18)
+#define PLLMB_MISC1_IDDQ		(1 << 17)
+#define PLLMB_MISC1_LOCK_ENABLE		(1 << 16)
 
-#define PLLMB_MISC0_DEFAULT_VALUE	0x00030000
-#define PLLMB_MISC0_WRITE_MASK		0x0007ffff
+#define PLLMB_MISC1_DEFAULT_VALUE	0x00030000
+#define PLLMB_MISC1_WRITE_MASK		0x0007ffff
 
 /* PLLP */
 #define PLLP_BASE_OVERRIDE		(1 << 28)
@@ -457,7 +458,8 @@
 			PLLCX_MISC3_WRITE_MASK);
 }
 
-void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
+static void tegra210_pllcx_set_defaults(const char *name,
+					struct tegra_clk_pll *pllcx)
 {
 	pllcx->params->defaults_set = true;
 
@@ -482,22 +484,22 @@
 	udelay(1);
 }
 
-void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
 {
 	tegra210_pllcx_set_defaults("PLL_C", pllcx);
 }
 
-void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
 {
 	tegra210_pllcx_set_defaults("PLL_C2", pllcx);
 }
 
-void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
 {
 	tegra210_pllcx_set_defaults("PLL_C3", pllcx);
 }
 
-void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
+static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
 {
 	tegra210_pllcx_set_defaults("PLL_A1", pllcx);
 }
@@ -507,7 +509,7 @@
  * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
  * Fractional SDM is allowed to provide exact audio rates.
  */
-void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
+static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
 {
 	u32 mask;
 	u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@
  * PLLD
  * PLL with fractional SDM.
  */
-void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
+static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
 {
 	u32 val;
 	u32 mask = 0xffff;
@@ -698,7 +700,7 @@
 	udelay(1);
 }
 
-void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
+static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
 {
 	plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
 			PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@
 			PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
 }
 
-void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
+static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
 {
 	plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
 			PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@
  * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
  * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
  */
-void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
+static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
 {
 	plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
 }
@@ -728,7 +730,7 @@
  * PLLRE
  * VCO is exposed to the clock tree directly along with post-divider output
  */
-void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
+static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
 {
 	u32 mask;
 	u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@
 {
 	unsigned long input_rate;
 
-	if (!IS_ERR_OR_NULL(hw->clk)) {
+	/* cf rate */
+	if (!IS_ERR_OR_NULL(hw->clk))
 		input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
-		/* cf rate */
-		input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
-	} else {
+	else
 		input_rate = 38400000;
-	}
+
+	input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
 
 	switch (input_rate) {
 	case 12000000:
@@ -841,7 +843,7 @@
 			PLLX_MISC5_WRITE_MASK);
 }
 
-void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
+static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
 {
 	u32 val;
 	u32 step_a, step_b;
@@ -901,7 +903,7 @@
 }
 
 /* PLLMB */
-void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
+static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
 {
 	u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
 
@@ -914,15 +916,15 @@
 		 * PLL is ON: check if defaults already set, then set those
 		 * that can be updated in flight.
 		 */
-		val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ);
-		mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE;
+		val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
+		mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
 		_pll_misc_chk_default(clk_base, pllmb->params, 0, val,
-				~mask & PLLMB_MISC0_WRITE_MASK);
+				~mask & PLLMB_MISC1_WRITE_MASK);
 
 		/* Enable lock detect */
 		val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
 		val &= ~mask;
-		val |= PLLMB_MISC0_DEFAULT_VALUE & mask;
+		val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
 		writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
 		udelay(1);
 
@@ -930,7 +932,7 @@
 	}
 
 	/* set IDDQ, enable lock detect */
-	writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE,
+	writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
 			clk_base + pllmb->params->ext_misc_reg[0]);
 	udelay(1);
 }
@@ -960,7 +962,7 @@
 			~mask & PLLP_MISC1_WRITE_MASK);
 }
 
-void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
+static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
 {
 	u32 mask;
 	u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@
 			~mask & PLLU_MISC1_WRITE_MASK);
 }
 
-void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
+static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
 {
 	u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
 
@@ -1212,8 +1214,9 @@
 	cfg->m *= PLL_SDM_COEFF;
 }
 
-unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
-					  unsigned long parent_rate)
+static unsigned long
+tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
+			    unsigned long parent_rate)
 {
 	unsigned long vco_min = params->vco_min;
 
@@ -1386,7 +1389,7 @@
 	.mdiv_default = 3,
 	.div_nmp = &pllc_nmp,
 	.freq_table = pll_cx_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.set_defaults = _pllc_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1425,7 +1428,7 @@
 	.ext_misc_reg[2] = PLLC2_MISC2,
 	.ext_misc_reg[3] = PLLC2_MISC3,
 	.freq_table = pll_cx_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.set_defaults = _pllc2_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1455,7 +1458,7 @@
 	.ext_misc_reg[2] = PLLC3_MISC2,
 	.ext_misc_reg[3] = PLLC3_MISC3,
 	.freq_table = pll_cx_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.set_defaults = _pllc3_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1505,7 +1508,6 @@
 	.base_reg = PLLC4_BASE,
 	.misc_reg = PLLC4_MISC0,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
 	.max_p = PLL_QLIN_PDIV_MAX,
 	.ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@
 	.div_nmp = &pllss_nmp,
 	.freq_table = pll_c4_vco_freq_table,
 	.set_defaults = tegra210_pllc4_set_defaults,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
-		 TEGRA_PLL_VCO_OUT,
+	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
 
@@ -1559,15 +1560,15 @@
 	.vco_min = 800000000,
 	.vco_max = 1866000000,
 	.base_reg = PLLM_BASE,
-	.misc_reg = PLLM_MISC1,
+	.misc_reg = PLLM_MISC2,
 	.lock_mask = PLL_BASE_LOCK,
 	.lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
-	.iddq_reg = PLLM_MISC0,
+	.iddq_reg = PLLM_MISC2,
 	.iddq_bit_idx = PLLM_IDDQ_BIT,
 	.max_p = PLL_QLIN_PDIV_MAX,
-	.ext_misc_reg[0] = PLLM_MISC0,
-	.ext_misc_reg[0] = PLLM_MISC1,
+	.ext_misc_reg[0] = PLLM_MISC2,
+	.ext_misc_reg[1] = PLLM_MISC1,
 	.round_p_to_pdiv = pll_qlin_p_to_pdiv,
 	.pdiv_tohw = pll_qlin_pdiv_to_hw,
 	.div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@
 	.vco_min = 800000000,
 	.vco_max = 1866000000,
 	.base_reg = PLLMB_BASE,
-	.misc_reg = PLLMB_MISC0,
+	.misc_reg = PLLMB_MISC1,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
-	.iddq_reg = PLLMB_MISC0,
+	.iddq_reg = PLLMB_MISC1,
 	.iddq_bit_idx = PLLMB_IDDQ_BIT,
 	.max_p = PLL_QLIN_PDIV_MAX,
-	.ext_misc_reg[0] = PLLMB_MISC0,
+	.ext_misc_reg[0] = PLLMB_MISC1,
 	.round_p_to_pdiv = pll_qlin_p_to_pdiv,
 	.pdiv_tohw = pll_qlin_pdiv_to_hw,
 	.div_nmp = &pllm_nmp,
 	.freq_table = pll_m_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.set_defaults = tegra210_pllmb_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1671,7 +1671,6 @@
 	.base_reg = PLLRE_BASE,
 	.misc_reg = PLLRE_MISC0,
 	.lock_mask = PLLRE_MISC_LOCK,
-	.lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
 	.max_p = PLL_QLIN_PDIV_MAX,
 	.ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@
 	.pdiv_tohw = pll_qlin_pdiv_to_hw,
 	.div_nmp = &pllre_nmp,
 	.freq_table = pll_re_vco_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC |
-		 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
 	.set_defaults = tegra210_pllre_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1712,7 +1710,6 @@
 	.base_reg = PLLP_BASE,
 	.misc_reg = PLLP_MISC0,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
 	.iddq_reg = PLLP_MISC0,
 	.iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@
 	.div_nmp = &pllp_nmp,
 	.freq_table = pll_p_freq_table,
 	.fixed_rate = 408000000,
-	.flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK |
-		 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+	.flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
 	.set_defaults = tegra210_pllp_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1750,7 +1746,7 @@
 	.ext_misc_reg[2] = PLLA1_MISC2,
 	.ext_misc_reg[3] = PLLA1_MISC3,
 	.freq_table = pll_cx_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.set_defaults = _plla1_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1787,7 +1783,6 @@
 	.base_reg = PLLA_BASE,
 	.misc_reg = PLLA_MISC0,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
 	.round_p_to_pdiv = pll_qlin_p_to_pdiv,
 	.pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@
 	.ext_misc_reg[1] = PLLA_MISC1,
 	.ext_misc_reg[2] = PLLA_MISC2,
 	.freq_table = pll_a_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW |
-		 TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
 	.set_defaults = tegra210_plla_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 	.set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@
 	.base_reg = PLLD_BASE,
 	.misc_reg = PLLD_MISC0,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
 	.lock_delay = 1000,
 	.iddq_reg = PLLD_MISC0,
 	.iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@
 	.ext_misc_reg[0] = PLLD_MISC0,
 	.ext_misc_reg[1] = PLLD_MISC1,
 	.freq_table = pll_d_freq_table,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.mdiv_default = 1,
 	.set_defaults = tegra210_plld_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@
 	.base_reg = PLLD2_BASE,
 	.misc_reg = PLLD2_MISC0,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
 	.iddq_reg = PLLD2_BASE,
 	.iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@
 	.mdiv_default = 1,
 	.freq_table = tegra210_pll_d2_freq_table,
 	.set_defaults = tegra210_plld2_set_defaults,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 	.set_gain = tegra210_clk_pll_set_gain,
 	.adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@
 	.base_reg = PLLDP_BASE,
 	.misc_reg = PLLDP_MISC,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
 	.lock_delay = 300,
 	.iddq_reg = PLLDP_BASE,
 	.iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@
 	.mdiv_default = 1,
 	.freq_table = pll_dp_freq_table,
 	.set_defaults = tegra210_plldp_set_defaults,
-	.flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+	.flags = TEGRA_PLL_USE_LOCK,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 	.set_gain = tegra210_clk_pll_set_gain,
 	.adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@
 	.base_reg = PLLU_BASE,
 	.misc_reg = PLLU_MISC0,
 	.lock_mask = PLL_BASE_LOCK,
-	.lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
 	.lock_delay = 1000,
 	.iddq_reg = PLLU_MISC0,
 	.iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@
 	.pdiv_tohw = pll_qlin_pdiv_to_hw,
 	.div_nmp = &pllu_nmp,
 	.freq_table = pll_u_freq_table,
-	.flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
-		 TEGRA_PLL_VCO_OUT,
+	.flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
 	.set_defaults = tegra210_pllu_set_defaults,
 	.calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -2218,6 +2207,7 @@
 	[tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
 	[tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
 	[tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
+	[tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@
 
 	/* PLLU_VCO */
 	val = readl(clk_base + pll_u_vco_params.base_reg);
-	val &= ~BIT(24); /* disable PLLU_OVERRIDE */
+	val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
 	writel(val, clk_base + pll_u_vco_params.base_reg);
 
 	clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@
 	{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
 	{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
 	{ TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
-	{ TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
-	{ TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
 	{ TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
 	{ TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
 	{ TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index e62f8cb..3bca438 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -78,6 +78,9 @@
 	ret = regmap_read(icst->map, icst->vcoreg_off, &val);
 	if (ret)
 		return ret;
+
+	/* Mask the 18 bits used by the VCO */
+	val &= ~0x7ffff;
 	val |= vco.v | (vco.r << 9) | (vco.s << 16);
 
 	/* This magic unlocks the VCO so it can be controlled */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 20de861..8bf9914 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -782,7 +782,7 @@
 	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
 			SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
 
-	clk_disable_unprepare(dd->iclk);
+	clk_disable(dd->iclk);
 
 	if (req->base.complete)
 		req->base.complete(&req->base, err);
@@ -795,7 +795,7 @@
 {
 	int err;
 
-	err = clk_prepare_enable(dd->iclk);
+	err = clk_enable(dd->iclk);
 	if (err)
 		return err;
 
@@ -822,7 +822,7 @@
 	dev_info(dd->dev,
 			"version: 0x%x\n", dd->hw_version);
 
-	clk_disable_unprepare(dd->iclk);
+	clk_disable(dd->iclk);
 }
 
 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1410,6 +1410,10 @@
 		goto res_err;
 	}
 
+	err = clk_prepare(sha_dd->iclk);
+	if (err)
+		goto res_err;
+
 	atmel_sha_hw_version_init(sha_dd);
 
 	atmel_sha_get_cap(sha_dd);
@@ -1421,12 +1425,12 @@
 			if (IS_ERR(pdata)) {
 				dev_err(&pdev->dev, "platform data not available\n");
 				err = PTR_ERR(pdata);
-				goto res_err;
+				goto iclk_unprepare;
 			}
 		}
 		if (!pdata->dma_slave) {
 			err = -ENXIO;
-			goto res_err;
+			goto iclk_unprepare;
 		}
 		err = atmel_sha_dma_init(sha_dd, pdata);
 		if (err)
@@ -1457,6 +1461,8 @@
 	if (sha_dd->caps.has_dma)
 		atmel_sha_dma_cleanup(sha_dd);
 err_sha_dma:
+iclk_unprepare:
+	clk_unprepare(sha_dd->iclk);
 res_err:
 	tasklet_kill(&sha_dd->done_task);
 sha_dd_err:
@@ -1483,12 +1489,7 @@
 	if (sha_dd->caps.has_dma)
 		atmel_sha_dma_cleanup(sha_dd);
 
-	iounmap(sha_dd->io_base);
-
-	clk_put(sha_dd->iclk);
-
-	if (sha_dd->irq >= 0)
-		free_irq(sha_dd->irq, sha_dd);
+	clk_unprepare(sha_dd->iclk);
 
 	return 0;
 }
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 0643e33..c0656e7 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -306,7 +306,7 @@
 		return -ENOMEM;
 
 	dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
-	if (!dma->cache_pool)
+	if (!dma->padding_pool)
 		return -ENOMEM;
 
 	cesa->dma = dma;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318..5ad0ec1 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@
 
 	/* Enable interrupts */
 	channel_set_bit(dw, MASK.XFER, dwc->mask);
-	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
 	channel_set_bit(dw, MASK.ERROR, dwc->mask);
 
 	dwc->initialized = true;
@@ -588,6 +587,9 @@
 
 		spin_unlock_irqrestore(&dwc->lock, flags);
 	}
+
+	/* Re-enable interrupts */
+	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
 }
 
 /* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@
 			dwc_scan_descriptors(dw, dwc);
 	}
 
-	/*
-	 * Re-enable interrupts.
-	 */
+	/* Re-enable interrupts */
 	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
-	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
 }
 
@@ -1261,6 +1260,7 @@
 int dw_dma_cyclic_start(struct dma_chan *chan)
 {
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(chan->device);
 	unsigned long		flags;
 
 	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@
 	}
 
 	spin_lock_irqsave(&dwc->lock, flags);
+
+	/* Enable interrupts to perform cyclic transfer */
+	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+
 	dwc_dostart(dwc, dwc->cdesc->desc[0]);
+
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
 	return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd..358f968 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@
 
 	/* Haswell */
 	{ PCI_VDEVICE(INTEL, 0x9c60) },
+
+	/* Broadwell */
+	{ PCI_VDEVICE(INTEL, 0x9ce0) },
+
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d655..e3d7fcb 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
 #define GET_NUM_REGN(x)		((x & 0x300000) >> 20) /* bits 20-21 */
 #define CHMAP_EXIST		BIT(24)
 
+/* CCSTAT register */
+#define EDMA_CCSTAT_ACTV	BIT(4)
+
 /*
  * Max of 20 segments per channel to conserve PaRAM slots
  * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@
 	spin_unlock_irqrestore(&echan->vchan.lock, flags);
 }
 
+/*
+ * This limit exists to avoid a possible infinite loop when waiting for proof
+ * that a particular transfer is completed. This limit can be hit if there
+ * are large bursts to/from slow devices or the CPU is never able to catch
+ * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * RX-FIFO, as many as 55 loops have been seen.
+ */
+#define EDMA_MAX_TR_WAIT_LOOPS 1000
+
 static u32 edma_residue(struct edma_desc *edesc)
 {
 	bool dst = edesc->direction == DMA_DEV_TO_MEM;
+	int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
+	struct edma_chan *echan = edesc->echan;
 	struct edma_pset *pset = edesc->pset;
 	dma_addr_t done, pos;
 	int i;
@@ -1691,7 +1705,32 @@
 	 * We always read the dst/src position from the first RamPar
 	 * pset. That's the one which is active now.
 	 */
-	pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
+	pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+
+	/*
+	 * "pos" may represent a transfer request that is still being
+	 * processed by the EDMACC or EDMATC. We will busy wait until
+	 * any one of the situations occurs:
+	 *   1. the DMA hardware is idle
+	 *   2. a new transfer request is setup
+	 *   3. we hit the loop limit
+	 */
+	while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
+		/* check if a new transfer request is setup */
+		if (edma_get_position(echan->ecc,
+				      echan->slot[0], dst) != pos) {
+			break;
+		}
+
+		if (!--loop_count) {
+			dev_dbg_ratelimited(echan->vchan.chan.device->dev,
+				"%s: timeout waiting for PaRAM update\n",
+				__func__);
+			break;
+		}
+
+		cpu_relax();
+	}
 
 	/*
 	 * Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2e..21539d5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@
 			return;
 	}
 
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+
+	/* handle the no-actives case */
+	if (!ioat_ring_active(ioat_chan)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		check_active(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+		spin_unlock_bh(&ioat_chan->cleanup_lock);
+		return;
+	}
+
 	/* if we haven't made progress and we have already
 	 * acknowledged a pending completion once, then be more
 	 * forceful with a restart
 	 */
-	spin_lock_bh(&ioat_chan->cleanup_lock);
 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
 		__cleanup(ioat_chan, phys_complete);
 	else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+		u32 chanerr;
+
+		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+		dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
+		dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+			 status, chanerr);
+		dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
+			 ioat_ring_active(ioat_chan));
+
 		spin_lock_bh(&ioat_chan->prep_lock);
 		ioat_restart_channel(ioat_chan);
 		spin_unlock_bh(&ioat_chan->prep_lock);
 		spin_unlock_bh(&ioat_chan->cleanup_lock);
 		return;
-	} else {
+	} else
 		set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
-		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-	}
 
-
-	if (ioat_ring_active(ioat_chan))
-		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-	else {
-		spin_lock_bh(&ioat_chan->prep_lock);
-		check_active(ioat_chan);
-		spin_unlock_bh(&ioat_chan->prep_lock);
-	}
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
 	spin_unlock_bh(&ioat_chan->cleanup_lock);
 }
 
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index c5b0d2b..b23a271c 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -221,7 +221,7 @@
 	}
 
 	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
-	    efivar_validate(name, data, size) == false) {
+	    efivar_validate(vendor, name, data, size) == false) {
 		printk(KERN_ERR "efivars: Malformed variable content\n");
 		return -EINVAL;
 	}
@@ -447,7 +447,8 @@
 	}
 
 	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
-	    efivar_validate(name, data, size) == false) {
+	    efivar_validate(new_var->VendorGuid, name, data,
+			    size) == false) {
 		printk(KERN_ERR "efivars: Malformed variable content\n");
 		return -EINVAL;
 	}
@@ -540,38 +541,30 @@
 static int
 efivar_create_sysfs_entry(struct efivar_entry *new_var)
 {
-	int i, short_name_size;
+	int short_name_size;
 	char *short_name;
-	unsigned long variable_name_size;
-	efi_char16_t *variable_name;
+	unsigned long utf8_name_size;
+	efi_char16_t *variable_name = new_var->var.VariableName;
 	int ret;
 
-	variable_name = new_var->var.VariableName;
-	variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
-
 	/*
-	 * Length of the variable bytes in ASCII, plus the '-' separator,
+	 * Length of the variable bytes in UTF8, plus the '-' separator,
 	 * plus the GUID, plus trailing NUL
 	 */
-	short_name_size = variable_name_size / sizeof(efi_char16_t)
-				+ 1 + EFI_VARIABLE_GUID_LEN + 1;
+	utf8_name_size = ucs2_utf8size(variable_name);
+	short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
 
-	short_name = kzalloc(short_name_size, GFP_KERNEL);
-
+	short_name = kmalloc(short_name_size, GFP_KERNEL);
 	if (!short_name)
 		return -ENOMEM;
 
-	/* Convert Unicode to normal chars (assume top bits are 0),
-	   ala UTF-8 */
-	for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
-		short_name[i] = variable_name[i] & 0xFF;
-	}
+	ucs2_as_utf8(short_name, variable_name, short_name_size);
+
 	/* This is ugly, but necessary to separate one vendor's
 	   private variables from another's.         */
-
-	*(short_name + strlen(short_name)) = '-';
+	short_name[utf8_name_size] = '-';
 	efi_guid_to_str(&new_var->var.VendorGuid,
-			 short_name + strlen(short_name));
+			 short_name + utf8_name_size + 1);
 
 	new_var->kobj.kset = efivars_kset;
 
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index d2a4962..0ac594c 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -165,67 +165,133 @@
 }
 
 struct variable_validate {
+	efi_guid_t vendor;
 	char *name;
 	bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
 			 unsigned long len);
 };
 
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine.  If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
 static const struct variable_validate variable_validate[] = {
-	{ "BootNext", validate_uint16 },
-	{ "BootOrder", validate_boot_order },
-	{ "DriverOrder", validate_boot_order },
-	{ "Boot*", validate_load_option },
-	{ "Driver*", validate_load_option },
-	{ "ConIn", validate_device_path },
-	{ "ConInDev", validate_device_path },
-	{ "ConOut", validate_device_path },
-	{ "ConOutDev", validate_device_path },
-	{ "ErrOut", validate_device_path },
-	{ "ErrOutDev", validate_device_path },
-	{ "Timeout", validate_uint16 },
-	{ "Lang", validate_ascii_string },
-	{ "PlatformLang", validate_ascii_string },
-	{ "", NULL },
+	{ EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+	{ EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+	{ EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+	{ EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+	{ EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+	{ LINUX_EFI_CRASH_GUID, "*", NULL },
+	{ NULL_GUID, "", NULL },
 };
 
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+		 int *match)
+{
+	for (*match = 0; ; (*match)++) {
+		char c = match_name[*match];
+		char u = var_name[*match];
+
+		/* Wildcard in the matching name means we've matched */
+		if (c == '*')
+			return true;
+
+		/* Case sensitive match */
+		if (!c && *match == len)
+			return true;
+
+		if (c != u)
+			return false;
+
+		if (!c)
+			return true;
+	}
+	return true;
+}
+
 bool
-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+		unsigned long data_size)
 {
 	int i;
-	u16 *unicode_name = var_name;
+	unsigned long utf8_size;
+	u8 *utf8_name;
 
-	for (i = 0; variable_validate[i].validate != NULL; i++) {
+	utf8_size = ucs2_utf8size(var_name);
+	utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+	if (!utf8_name)
+		return false;
+
+	ucs2_as_utf8(utf8_name, var_name, utf8_size);
+	utf8_name[utf8_size] = '\0';
+
+	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
 		const char *name = variable_validate[i].name;
-		int match;
+		int match = 0;
 
-		for (match = 0; ; match++) {
-			char c = name[match];
-			u16 u = unicode_name[match];
+		if (efi_guidcmp(vendor, variable_validate[i].vendor))
+			continue;
 
-			/* All special variables are plain ascii */
-			if (u > 127)
-				return true;
-
-			/* Wildcard in the matching name means we've matched */
-			if (c == '*')
-				return variable_validate[i].validate(var_name,
-							     match, data, len);
-
-			/* Case sensitive match */
-			if (c != u)
+		if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+			if (variable_validate[i].validate == NULL)
 				break;
-
-			/* Reached the end of the string while matching */
-			if (!c)
-				return variable_validate[i].validate(var_name,
-							     match, data, len);
+			kfree(utf8_name);
+			return variable_validate[i].validate(var_name, match,
+							     data, data_size);
 		}
 	}
-
+	kfree(utf8_name);
 	return true;
 }
 EXPORT_SYMBOL_GPL(efivar_validate);
 
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+			     size_t len)
+{
+	int i;
+	bool found = false;
+	int match = 0;
+
+	/*
+	 * Check if our variable is in the validated variables list
+	 */
+	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+		if (efi_guidcmp(variable_validate[i].vendor, vendor))
+			continue;
+
+		if (variable_matches(var_name, len,
+				     variable_validate[i].name, &match)) {
+			found = true;
+			break;
+		}
+	}
+
+	/*
+	 * If it's in our list, it is removable.
+	 */
+	return found;
+}
+EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+
 static efi_status_t
 check_var_size(u32 attributes, unsigned long size)
 {
@@ -864,7 +930,7 @@
 
 	*set = false;
 
-	if (efivar_validate(name, data, *size) == false)
+	if (efivar_validate(*vendor, name, data, *size) == false)
 		return -EINVAL;
 
 	/*
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 2aeaebd..3f87a03 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -312,8 +312,8 @@
 		handle_simple_irq, IRQ_TYPE_NONE);
 
 	if (ret) {
-		dev_info(&pdev->dev, "could not add irqchip\n");
-		return ret;
+		dev_err(&pdev->dev, "could not add irqchip\n");
+		goto teardown;
 	}
 
 	gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
@@ -326,6 +326,7 @@
 skip_irq:
 	return 0;
 teardown:
+	of_mm_gpiochip_remove(&altera_gc->mmchip);
 	pr_err("%s: registration failed with status %d\n",
 		node->full_name, ret);
 
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index ec58f42..cd007a6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -195,7 +195,7 @@
 static int davinci_gpio_probe(struct platform_device *pdev)
 {
 	int i, base;
-	unsigned ngpio;
+	unsigned ngpio, nbank;
 	struct davinci_gpio_controller *chips;
 	struct davinci_gpio_platform_data *pdata;
 	struct davinci_gpio_regs __iomem *regs;
@@ -224,8 +224,9 @@
 	if (WARN_ON(ARCH_NR_GPIOS < ngpio))
 		ngpio = ARCH_NR_GPIOS;
 
+	nbank = DIV_ROUND_UP(ngpio, 32);
 	chips = devm_kzalloc(dev,
-			     ngpio * sizeof(struct davinci_gpio_controller),
+			     nbank * sizeof(struct davinci_gpio_controller),
 			     GFP_KERNEL);
 	if (!chips)
 		return -ENOMEM;
@@ -511,7 +512,7 @@
 			return irq;
 		}
 
-		irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0,
+		irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
 							&davinci_gpio_irq_ops,
 							chips);
 		if (!irq_domain) {
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 66f729e..20c9539 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
 
 # add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
 	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
 	amdgpu_amdkfd_gfx_v7.o
 
@@ -34,6 +34,7 @@
 
 # add GMC block
 amdgpu-y += \
+	gmc_v7_0.o \
 	gmc_v8_0.o
 
 # add IH block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 82edf95..5e7770f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -87,6 +87,8 @@
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_enable_semaphores;
 extern int amdgpu_powerplay;
+extern unsigned amdgpu_pcie_gen_cap;
+extern unsigned amdgpu_pcie_lane_cap;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -132,47 +134,6 @@
 #define AMDGPU_RESET_VCE			(1 << 13)
 #define AMDGPU_RESET_VCE1			(1 << 14)
 
-/* CG block flags */
-#define AMDGPU_CG_BLOCK_GFX			(1 << 0)
-#define AMDGPU_CG_BLOCK_MC			(1 << 1)
-#define AMDGPU_CG_BLOCK_SDMA			(1 << 2)
-#define AMDGPU_CG_BLOCK_UVD			(1 << 3)
-#define AMDGPU_CG_BLOCK_VCE			(1 << 4)
-#define AMDGPU_CG_BLOCK_HDP			(1 << 5)
-#define AMDGPU_CG_BLOCK_BIF			(1 << 6)
-
-/* CG flags */
-#define AMDGPU_CG_SUPPORT_GFX_MGCG		(1 << 0)
-#define AMDGPU_CG_SUPPORT_GFX_MGLS		(1 << 1)
-#define AMDGPU_CG_SUPPORT_GFX_CGCG		(1 << 2)
-#define AMDGPU_CG_SUPPORT_GFX_CGLS		(1 << 3)
-#define AMDGPU_CG_SUPPORT_GFX_CGTS		(1 << 4)
-#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
-#define AMDGPU_CG_SUPPORT_GFX_CP_LS		(1 << 6)
-#define AMDGPU_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
-#define AMDGPU_CG_SUPPORT_MC_LS			(1 << 8)
-#define AMDGPU_CG_SUPPORT_MC_MGCG		(1 << 9)
-#define AMDGPU_CG_SUPPORT_SDMA_LS		(1 << 10)
-#define AMDGPU_CG_SUPPORT_SDMA_MGCG		(1 << 11)
-#define AMDGPU_CG_SUPPORT_BIF_LS		(1 << 12)
-#define AMDGPU_CG_SUPPORT_UVD_MGCG		(1 << 13)
-#define AMDGPU_CG_SUPPORT_VCE_MGCG		(1 << 14)
-#define AMDGPU_CG_SUPPORT_HDP_LS		(1 << 15)
-#define AMDGPU_CG_SUPPORT_HDP_MGCG		(1 << 16)
-
-/* PG flags */
-#define AMDGPU_PG_SUPPORT_GFX_PG		(1 << 0)
-#define AMDGPU_PG_SUPPORT_GFX_SMG		(1 << 1)
-#define AMDGPU_PG_SUPPORT_GFX_DMG		(1 << 2)
-#define AMDGPU_PG_SUPPORT_UVD			(1 << 3)
-#define AMDGPU_PG_SUPPORT_VCE			(1 << 4)
-#define AMDGPU_PG_SUPPORT_CP			(1 << 5)
-#define AMDGPU_PG_SUPPORT_GDS			(1 << 6)
-#define AMDGPU_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
-#define AMDGPU_PG_SUPPORT_SDMA			(1 << 8)
-#define AMDGPU_PG_SUPPORT_ACP			(1 << 9)
-#define AMDGPU_PG_SUPPORT_SAMU			(1 << 10)
-
 /* GFX current status */
 #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
 #define AMDGPU_GFX_SAFE_MODE			0x00000001L
@@ -606,8 +567,6 @@
 	uint32_t		align;
 };
 
-struct amdgpu_sa_bo;
-
 /* sub-allocation buffer */
 struct amdgpu_sa_bo {
 	struct list_head		olist;
@@ -2360,6 +2319,8 @@
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 				     uint32_t flags);
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+				  unsigned long end);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0e13763..362bedc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -154,7 +154,7 @@
 	.get_fw_version = get_fw_version
 };
 
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
 {
 	return (struct kfd2kgd_calls *)&kfd2kgd;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 79fa5c7..04b744d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -115,7 +115,7 @@
 	.get_fw_version = get_fw_version
 };
 
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
 {
 	return (struct kfd2kgd_calls *)&kfd2kgd;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a081dda..7a4b101 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -795,6 +795,12 @@
 	case CGS_SYSTEM_INFO_PCIE_MLW:
 		sys_info->value = adev->pm.pcie_mlw_mask;
 		break;
+	case CGS_SYSTEM_INFO_CG_FLAGS:
+		sys_info->value = adev->cg_flags;
+		break;
+	case CGS_SYSTEM_INFO_PG_FLAGS:
+		sys_info->value = adev->pg_flags;
+		break;
 	default:
 		return -ENODEV;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6553146..51bfc11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1795,15 +1795,20 @@
 	}
 
 	/* post card */
-	amdgpu_atom_asic_init(adev->mode_info.atom_context);
+	if (!amdgpu_card_posted(adev))
+		amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
 	r = amdgpu_resume(adev);
+	if (r)
+		DRM_ERROR("amdgpu_resume failed (%d).\n", r);
 
 	amdgpu_fence_driver_resume(adev);
 
-	r = amdgpu_ib_ring_tests(adev);
-	if (r)
-		DRM_ERROR("ib ring test failed (%d).\n", r);
+	if (resume) {
+		r = amdgpu_ib_ring_tests(adev);
+		if (r)
+			DRM_ERROR("ib ring test failed (%d).\n", r);
+	}
 
 	r = amdgpu_late_init(adev);
 	if (r)
@@ -1933,80 +1938,97 @@
 	return r;
 }
 
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007  /* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
+
 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
 {
 	u32 mask;
 	int ret;
 
-	if (pci_is_root_bus(adev->pdev->bus))
+	if (amdgpu_pcie_gen_cap)
+		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
+
+	if (amdgpu_pcie_lane_cap)
+		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
+
+	/* covers APUs as well */
+	if (pci_is_root_bus(adev->pdev->bus)) {
+		if (adev->pm.pcie_gen_mask == 0)
+			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+		if (adev->pm.pcie_mlw_mask == 0)
+			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
 		return;
-
-	if (amdgpu_pcie_gen2 == 0)
-		return;
-
-	if (adev->flags & AMD_IS_APU)
-		return;
-
-	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
-	if (!ret) {
-		adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
-					  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
-					  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
-		if (mask & DRM_PCIE_SPEED_25)
-			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
-		if (mask & DRM_PCIE_SPEED_50)
-			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
-		if (mask & DRM_PCIE_SPEED_80)
-			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
 	}
-	ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
-	if (!ret) {
-		switch (mask) {
-		case 32:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 16:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 12:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 8:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 4:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 2:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 1:
-			adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
-			break;
-		default:
-			break;
+
+	if (adev->pm.pcie_gen_mask == 0) {
+		ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+		if (!ret) {
+			adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+
+			if (mask & DRM_PCIE_SPEED_25)
+				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+			if (mask & DRM_PCIE_SPEED_50)
+				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
+			if (mask & DRM_PCIE_SPEED_80)
+				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
+		} else {
+			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+		}
+	}
+	if (adev->pm.pcie_mlw_mask == 0) {
+		ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
+		if (!ret) {
+			switch (mask) {
+			case 32:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 16:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 12:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 8:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 4:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 2:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 1:
+				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+				break;
+			default:
+				break;
+			}
+		} else {
+			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
 		}
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b5dbbb5..9ef1db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -83,6 +83,8 @@
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_enable_semaphores = 0;
 int amdgpu_powerplay = -1;
+unsigned amdgpu_pcie_gen_cap = 0;
+unsigned amdgpu_pcie_lane_cap = 0;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -170,6 +172,12 @@
 module_param_named(powerplay, amdgpu_powerplay, int, 0444);
 #endif
 
+MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
+module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+
+MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
+module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+
 static struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
@@ -256,11 +264,11 @@
 	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
 	/* topaz */
-	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
 	/* tonga */
 	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index b1969f2..d4e2780 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -142,7 +142,8 @@
 
 		list_for_each_entry(bo, &node->bos, mn_list) {
 
-			if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
+							  end))
 				continue;
 
 			r = amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index a2a16ac..b8fbbd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
+#include <drm/drm_cache.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
@@ -261,6 +262,13 @@
 				       AMDGPU_GEM_DOMAIN_OA);
 
 	bo->flags = flags;
+
+	/* For architectures that don't support WC memory,
+	 * mask out the WC flag from the BO
+	 */
+	if (!drm_arch_can_wc_memory())
+		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
 	amdgpu_fill_placement_to_bo(bo, placement);
 	/* Kernel allocation are uninterruptible */
 	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8b88edb..ca72a2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -354,12 +354,15 @@
 
 		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
 			if (fences[i])
-				fences[count++] = fences[i];
+				fences[count++] = fence_get(fences[i]);
 
 		if (count) {
 			spin_unlock(&sa_manager->wq.lock);
 			t = fence_wait_any_timeout(fences, count, false,
 						   MAX_SCHEDULE_TIMEOUT);
+			for (i = 0; i < count; ++i)
+				fence_put(fences[i]);
+
 			r = (t > 0) ? 0 : t;
 			spin_lock(&sa_manager->wq.lock);
 		} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8a1752f..1cbb16e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -712,7 +712,7 @@
 						       0, PAGE_SIZE,
 						       PCI_DMA_BIDIRECTIONAL);
 		if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
-			while (--i) {
+			while (i--) {
 				pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 				gtt->ttm.dma_address[i] = 0;
@@ -783,6 +783,25 @@
 	return !!gtt->userptr;
 }
 
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+				  unsigned long end)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	unsigned long size;
+
+	if (gtt == NULL)
+		return false;
+
+	if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+		return false;
+
+	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+	if (gtt->userptr > end || gtt->userptr + size <= start)
+		return false;
+
+	return true;
+}
+
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -808,7 +827,7 @@
 			flags |= AMDGPU_PTE_SNOOPED;
 	}
 
-	if (adev->asic_type >= CHIP_TOPAZ)
+	if (adev->asic_type >= CHIP_TONGA)
 		flags |= AMDGPU_PTE_EXECUTABLE;
 
 	flags |= AMDGPU_PTE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 8b4731d..474ca02 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -31,6 +31,7 @@
 #include "ci_dpm.h"
 #include "gfx_v7_0.h"
 #include "atom.h"
+#include "amd_pcie.h"
 #include <linux/seq_file.h>
 
 #include "smu/smu_7_0_1_d.h"
@@ -5835,18 +5836,16 @@
 	u8 frev, crev;
 	struct ci_power_info *pi;
 	int ret;
-	u32 mask;
 
 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
 	if (pi == NULL)
 		return -ENOMEM;
 	adev->pm.dpm.priv = pi;
 
-	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
-	if (ret)
-		pi->sys_pcie_mask = 0;
-	else
-		pi->sys_pcie_mask = mask;
+	pi->sys_pcie_mask =
+		(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+		CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+
 	pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
 
 	pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index fd9c958..155965e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1762,6 +1762,9 @@
 	if (amdgpu_aspm == 0)
 		return;
 
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
 	/* XXX double check APUs */
 	if (adev->flags & AMD_IS_APU)
 		return;
@@ -2332,72 +2335,72 @@
 	switch (adev->asic_type) {
 	case CHIP_BONAIRE:
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_MC_LS |
-			AMDGPU_CG_SUPPORT_MC_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 	case CHIP_HAWAII:
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_MC_LS |
-			AMDGPU_CG_SUPPORT_MC_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags = 0;
 		adev->external_rev_id = 0x28;
 		break;
 	case CHIP_KAVERI:
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags =
-			/*AMDGPU_PG_SUPPORT_GFX_PG |
-			  AMDGPU_PG_SUPPORT_GFX_SMG |
-			  AMDGPU_PG_SUPPORT_GFX_DMG |*/
-			AMDGPU_PG_SUPPORT_UVD |
-			/*AMDGPU_PG_SUPPORT_VCE |
-			  AMDGPU_PG_SUPPORT_CP |
-			  AMDGPU_PG_SUPPORT_GDS |
-			  AMDGPU_PG_SUPPORT_RLC_SMU_HS |
-			  AMDGPU_PG_SUPPORT_ACP |
-			  AMDGPU_PG_SUPPORT_SAMU |*/
+			/*AMD_PG_SUPPORT_GFX_PG |
+			  AMD_PG_SUPPORT_GFX_SMG |
+			  AMD_PG_SUPPORT_GFX_DMG |*/
+			AMD_PG_SUPPORT_UVD |
+			/*AMD_PG_SUPPORT_VCE |
+			  AMD_PG_SUPPORT_CP |
+			  AMD_PG_SUPPORT_GDS |
+			  AMD_PG_SUPPORT_RLC_SMU_HS |
+			  AMD_PG_SUPPORT_ACP |
+			  AMD_PG_SUPPORT_SAMU |*/
 			0;
 		if (adev->pdev->device == 0x1312 ||
 			adev->pdev->device == 0x1316 ||
@@ -2409,29 +2412,29 @@
 	case CHIP_KABINI:
 	case CHIP_MULLINS:
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags =
-			/*AMDGPU_PG_SUPPORT_GFX_PG |
-			  AMDGPU_PG_SUPPORT_GFX_SMG | */
-			AMDGPU_PG_SUPPORT_UVD |
-			/*AMDGPU_PG_SUPPORT_VCE |
-			  AMDGPU_PG_SUPPORT_CP |
-			  AMDGPU_PG_SUPPORT_GDS |
-			  AMDGPU_PG_SUPPORT_RLC_SMU_HS |
-			  AMDGPU_PG_SUPPORT_SAMU |*/
+			/*AMD_PG_SUPPORT_GFX_PG |
+			  AMD_PG_SUPPORT_GFX_SMG | */
+			AMD_PG_SUPPORT_UVD |
+			/*AMD_PG_SUPPORT_VCE |
+			  AMD_PG_SUPPORT_CP |
+			  AMD_PG_SUPPORT_GDS |
+			  AMD_PG_SUPPORT_RLC_SMU_HS |
+			  AMD_PG_SUPPORT_SAMU |*/
 			0;
 		if (adev->asic_type == CHIP_KABINI) {
 			if (adev->rev_id == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5f712ce..c55ecf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -885,7 +885,7 @@
 {
 	u32 orig, data;
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
 		WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
 		WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
 	} else {
@@ -906,7 +906,7 @@
 {
 	u32 orig, data;
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
 		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 		data |= 0x100;
 		if (orig != data)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 4dd17f2..9056355 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -445,13 +445,13 @@
 	pi->gfx_pg_threshold = 500;
 	pi->caps_fps = true;
 	/* uvd */
-	pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+	pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
 	pi->caps_uvd_dpm = true;
 	/* vce */
-	pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
+	pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
 	pi->caps_vce_dpm = true;
 	/* acp */
-	pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+	pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
 	pi->caps_acp_dpm = true;
 
 	pi->caps_stable_power_state = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 72793f9..7732059 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4109,7 +4109,7 @@
 
 	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
 		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
 
 		tmp = gfx_v7_0_halt_rlc(adev);
@@ -4147,9 +4147,9 @@
 {
 	u32 data, orig, tmp = 0;
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) {
-		if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) {
-			if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
 				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
 				if (orig != data)
@@ -4176,14 +4176,14 @@
 
 		gfx_v7_0_update_rlc(adev, tmp);
 
-		if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) {
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
 			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
 			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
 			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
 			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
 			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
-			if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) &&
-			    (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS))
+			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
+			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
 				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
 			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
 			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
@@ -4249,7 +4249,7 @@
 	u32 data, orig;
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
 	else
 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
@@ -4263,7 +4263,7 @@
 	u32 data, orig;
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
 	else
 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
@@ -4276,7 +4276,7 @@
 	u32 data, orig;
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
 		data &= ~0x8000;
 	else
 		data |= 0x8000;
@@ -4289,7 +4289,7 @@
 	u32 data, orig;
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
 		data &= ~0x2000;
 	else
 		data |= 0x2000;
@@ -4370,7 +4370,7 @@
 {
 	u32 data, orig;
 
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) {
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
 		orig = data = RREG32(mmRLC_PG_CNTL);
 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
 		if (orig != data)
@@ -4442,7 +4442,7 @@
 	u32 data, orig;
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
 		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
 	else
 		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
@@ -4456,7 +4456,7 @@
 	u32 data, orig;
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
 		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
 	else
 		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
@@ -4623,15 +4623,15 @@
 
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
 {
-	if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
-			      AMDGPU_PG_SUPPORT_GFX_SMG |
-			      AMDGPU_PG_SUPPORT_GFX_DMG |
-			      AMDGPU_PG_SUPPORT_CP |
-			      AMDGPU_PG_SUPPORT_GDS |
-			      AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
 		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
 		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
-		if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
 			gfx_v7_0_init_gfx_cgpg(adev);
 			gfx_v7_0_enable_cp_pg(adev, true);
 			gfx_v7_0_enable_gds_pg(adev, true);
@@ -4643,14 +4643,14 @@
 
 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
 {
-	if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
-			      AMDGPU_PG_SUPPORT_GFX_SMG |
-			      AMDGPU_PG_SUPPORT_GFX_DMG |
-			      AMDGPU_PG_SUPPORT_CP |
-			      AMDGPU_PG_SUPPORT_GDS |
-			      AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
 		gfx_v7_0_update_gfx_pg(adev, false);
-		if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
 			gfx_v7_0_enable_cp_pg(adev, false);
 			gfx_v7_0_enable_gds_pg(adev, false);
 		}
@@ -4738,6 +4738,22 @@
 	return 0;
 }
 
+static int gfx_v7_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+	if (r)
+		return r;
+
+	return 0;
+}
+
 static int gfx_v7_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
@@ -4890,6 +4906,8 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 	gfx_v7_0_cp_enable(adev, false);
 	gfx_v7_0_rlc_stop(adev);
 	gfx_v7_0_fini_pg(adev);
@@ -5509,14 +5527,14 @@
 	if (state == AMD_PG_STATE_GATE)
 		gate = true;
 
-	if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
-			      AMDGPU_PG_SUPPORT_GFX_SMG |
-			      AMDGPU_PG_SUPPORT_GFX_DMG |
-			      AMDGPU_PG_SUPPORT_CP |
-			      AMDGPU_PG_SUPPORT_GDS |
-			      AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
 		gfx_v7_0_update_gfx_pg(adev, gate);
-		if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
 			gfx_v7_0_enable_cp_pg(adev, gate);
 			gfx_v7_0_enable_gds_pg(adev, gate);
 		}
@@ -5527,7 +5545,7 @@
 
 const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
 	.early_init = gfx_v7_0_early_init,
-	.late_init = NULL,
+	.late_init = gfx_v7_0_late_init,
 	.sw_init = gfx_v7_0_sw_init,
 	.sw_fini = gfx_v7_0_sw_fini,
 	.hw_init = gfx_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 95c0cdf..8f8ec37 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -111,7 +111,6 @@
 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 
 MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -828,7 +827,8 @@
 	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-	if (adev->asic_type != CHIP_STONEY) {
+	if ((adev->asic_type != CHIP_STONEY) &&
+	    (adev->asic_type != CHIP_TOPAZ)) {
 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
 		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
 		if (!err) {
@@ -3851,10 +3851,16 @@
 			if (r)
 				return -EINVAL;
 
-			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-							AMDGPU_UCODE_ID_CP_MEC1);
-			if (r)
-				return -EINVAL;
+			if (adev->asic_type == CHIP_TOPAZ) {
+				r = gfx_v8_0_cp_compute_load_microcode(adev);
+				if (r)
+					return r;
+			} else {
+				r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+										 AMDGPU_UCODE_ID_CP_MEC1);
+				if (r)
+					return -EINVAL;
+			}
 		}
 	}
 
@@ -3901,6 +3907,8 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 	gfx_v8_0_cp_enable(adev, false);
 	gfx_v8_0_rlc_stop(adev);
 	gfx_v8_0_cp_compute_fini(adev);
@@ -4329,6 +4337,14 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int r;
 
+	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+	if (r)
+		return r;
+
 	/* requires IBs so do in late init after IB pool is initialized */
 	r = gfx_v8_0_do_edc_gpr_workarounds(adev);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 3f95606..b806079 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -42,9 +42,39 @@
 
 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+static const u32 golden_settings_iceland_a11[] =
+{
+	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_TOPAZ:
+		amdgpu_program_register_sequence(adev,
+						 iceland_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_iceland_a11,
+						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+		break;
+	default:
+		break;
+	}
+}
 
 /**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ * gmc7_mc_wait_for_idle - wait for MC idle callback.
  *
  * @adev: amdgpu_device pointer
  *
@@ -132,13 +162,20 @@
 	case CHIP_HAWAII:
 		chip_name = "hawaii";
 		break;
+	case CHIP_TOPAZ:
+		chip_name = "topaz";
+		break;
 	case CHIP_KAVERI:
 	case CHIP_KABINI:
 		return 0;
 	default: BUG();
 	}
 
-	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+	if (adev->asic_type == CHIP_TOPAZ)
+		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+	else
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
 	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
 	if (err)
 		goto out;
@@ -755,7 +792,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 		orig = data = RREG32(mc_cg_registers[i]);
-		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 			data |= mc_cg_ls_en[i];
 		else
 			data &= ~mc_cg_ls_en[i];
@@ -772,7 +809,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 		orig = data = RREG32(mc_cg_registers[i]);
-		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
 			data |= mc_cg_en[i];
 		else
 			data &= ~mc_cg_en[i];
@@ -788,7 +825,7 @@
 
 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -811,7 +848,7 @@
 
 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 	else
 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -827,7 +864,7 @@
 
 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 	else
 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -984,6 +1021,8 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	gmc_v7_0_init_golden_registers(adev);
+
 	gmc_v7_0_mc_program(adev);
 
 	if (!(adev->flags & AMD_IS_APU)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c0c9a01..3efd455 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -42,9 +42,7 @@
 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -75,19 +73,6 @@
 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 golden_settings_iceland_a11[] =
-{
-	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
-};
-
-static const u32 iceland_mgcg_cgcg_init[] =
-{
-	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
-};
-
 static const u32 cz_mgcg_cgcg_init[] =
 {
 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@
 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
-	case CHIP_TOPAZ:
-		amdgpu_program_register_sequence(adev,
-						 iceland_mgcg_cgcg_init,
-						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
-		amdgpu_program_register_sequence(adev,
-						 golden_settings_iceland_a11,
-						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
-		break;
 	case CHIP_FIJI:
 		amdgpu_program_register_sequence(adev,
 						 fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@
 	DRM_DEBUG("\n");
 
 	switch (adev->asic_type) {
-	case CHIP_TOPAZ:
-		chip_name = "topaz";
-		break;
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
 	case CHIP_FIJI:
-		chip_name = "fiji";
-		break;
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
 		return 0;
@@ -1007,7 +979,7 @@
 
 	gmc_v8_0_mc_program(adev);
 
-	if (!(adev->flags & AMD_IS_APU)) {
+	if (adev->asic_type == CHIP_TONGA) {
 		r = gmc_v8_0_mc_load_microcode(adev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 966d4b2..090486c 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -432,7 +432,7 @@
 		case AMDGPU_UCODE_ID_CP_ME:
 			return UCODE_ID_CP_ME_MASK;
 		case AMDGPU_UCODE_ID_CP_MEC1:
-			return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+			return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
 		case AMDGPU_UCODE_ID_CP_MEC2:
 			return UCODE_ID_CP_MEC_MASK;
 		case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@
 		return -EINVAL;
 	}
 
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-		return -EINVAL;
-	}
-
 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
 			&toc->entry[toc->num_entries++])) {
 		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@
 			UCODE_ID_CP_ME_MASK |
 			UCODE_ID_CP_PFP_MASK |
 			UCODE_ID_CP_MEC_MASK |
-			UCODE_ID_CP_MEC_JT1_MASK |
-			UCODE_ID_CP_MEC_JT2_MASK;
+			UCODE_ID_CP_MEC_JT1_MASK;
+
 
 	if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
 		DRM_ERROR("Fail to request SMU load ucode\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c..654d767 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2859,11 +2859,11 @@
 	pi->voltage_drop_t = 0;
 	pi->caps_sclk_throttle_low_notification = false;
 	pi->caps_fps = false; /* true? */
-	pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+	pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
 	pi->caps_uvd_dpm = true;
-	pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
-	pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false;
-	pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+	pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
+	pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
+	pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
 	pi->caps_stable_p_state = false;
 
 	ret = kv_parse_sys_info_table(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5e9f73a..fbd3767 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -611,7 +611,7 @@
 {
 	u32 orig, data;
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 		data = 0xfff;
 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
@@ -830,6 +830,9 @@
 	bool gate = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+		return 0;
+
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
 
@@ -848,7 +851,10 @@
 	 * revisit this when there is a cleaner line between
 	 * the smc and the hw blocks
 	 */
-	 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+		return 0;
 
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 38864f56..57f1c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -774,6 +774,11 @@
 static int uvd_v5_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+		return 0;
+
 	return 0;
 }
 
@@ -789,6 +794,9 @@
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v5_0_stop(adev);
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3d59139..0b365b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -532,7 +532,7 @@
 	uvd_v6_0_mc_resume(adev);
 
 	/* Set dynamic clock gating in S/W control mode */
-	if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) {
+	if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
 		if (adev->flags & AMD_IS_APU)
 			cz_set_uvd_clock_gating_branches(adev, false);
 		else
@@ -1000,7 +1000,7 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
-	if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG))
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
 	if (enable) {
@@ -1030,6 +1030,9 @@
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v6_0_stop(adev);
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 52ac7a8..a822eda 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -373,7 +373,7 @@
 {
 	bool sw_cg = false;
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
 		if (sw_cg)
 			vce_v2_0_set_sw_cg(adev, true);
 		else
@@ -608,6 +608,9 @@
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE)
 		/* XXX do we need a vce_v2_0_stop()? */
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index e99af81..d662fa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -277,7 +277,7 @@
 		WREG32_P(mmVCE_STATUS, 0, ~1);
 
 		/* Set Clock-Gating off */
-		if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)
+		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
 			vce_v3_0_set_vce_sw_clock_gating(adev, false);
 
 		if (r) {
@@ -676,7 +676,7 @@
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	int i;
 
-	if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG))
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
 		return 0;
 
 	mutex_lock(&adev->grbm_idx_mutex);
@@ -728,6 +728,9 @@
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE)
 		/* XXX do we need a vce_v3_0_stop()? */
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 652e766..0d14d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -61,6 +61,7 @@
 #include "vi.h"
 #include "vi_dpm.h"
 #include "gmc_v8_0.h"
+#include "gmc_v7_0.h"
 #include "gfx_v8_0.h"
 #include "sdma_v2_4.h"
 #include "sdma_v3_0.h"
@@ -1109,10 +1110,10 @@
 	},
 	{
 		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
+		.major = 7,
+		.minor = 4,
 		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
+		.funcs = &gmc_v7_0_ip_funcs,
 	},
 	{
 		.type = AMD_IP_BLOCK_TYPE_IH,
@@ -1442,8 +1443,7 @@
 		break;
 	case CHIP_FIJI:
 		adev->has_uvd = true;
-		adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
-				AMDGPU_CG_SUPPORT_VCE_MGCG;
+		adev->cg_flags = 0;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x3c;
 		break;
@@ -1457,8 +1457,7 @@
 	case CHIP_STONEY:
 		adev->has_uvd = true;
 		adev->cg_flags = 0;
-		/* Disable UVD pg */
-		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
+		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x1;
 		break;
 	default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 9be0070..a902ae0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -194,7 +194,7 @@
 
 	kfree(p);
 
-	kfree((void *)work);
+	kfree(work);
 }
 
 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 1195d06f..dbf7e64 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -85,6 +85,38 @@
 	AMD_PG_STATE_UNGATE,
 };
 
+/* CG flags */
+#define AMD_CG_SUPPORT_GFX_MGCG			(1 << 0)
+#define AMD_CG_SUPPORT_GFX_MGLS			(1 << 1)
+#define AMD_CG_SUPPORT_GFX_CGCG			(1 << 2)
+#define AMD_CG_SUPPORT_GFX_CGLS			(1 << 3)
+#define AMD_CG_SUPPORT_GFX_CGTS			(1 << 4)
+#define AMD_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
+#define AMD_CG_SUPPORT_GFX_CP_LS		(1 << 6)
+#define AMD_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
+#define AMD_CG_SUPPORT_MC_LS			(1 << 8)
+#define AMD_CG_SUPPORT_MC_MGCG			(1 << 9)
+#define AMD_CG_SUPPORT_SDMA_LS			(1 << 10)
+#define AMD_CG_SUPPORT_SDMA_MGCG		(1 << 11)
+#define AMD_CG_SUPPORT_BIF_LS			(1 << 12)
+#define AMD_CG_SUPPORT_UVD_MGCG			(1 << 13)
+#define AMD_CG_SUPPORT_VCE_MGCG			(1 << 14)
+#define AMD_CG_SUPPORT_HDP_LS			(1 << 15)
+#define AMD_CG_SUPPORT_HDP_MGCG			(1 << 16)
+
+/* PG flags */
+#define AMD_PG_SUPPORT_GFX_PG			(1 << 0)
+#define AMD_PG_SUPPORT_GFX_SMG			(1 << 1)
+#define AMD_PG_SUPPORT_GFX_DMG			(1 << 2)
+#define AMD_PG_SUPPORT_UVD			(1 << 3)
+#define AMD_PG_SUPPORT_VCE			(1 << 4)
+#define AMD_PG_SUPPORT_CP			(1 << 5)
+#define AMD_PG_SUPPORT_GDS			(1 << 6)
+#define AMD_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
+#define AMD_PG_SUPPORT_SDMA			(1 << 8)
+#define AMD_PG_SUPPORT_ACP			(1 << 9)
+#define AMD_PG_SUPPORT_SAMU			(1 << 10)
+
 enum amd_pm_state_type {
 	/* not used for dpm */
 	POWER_STATE_TYPE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 713aec9..aec38fc 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -109,6 +109,8 @@
 	CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
 	CGS_SYSTEM_INFO_PCIE_GEN_INFO,
 	CGS_SYSTEM_INFO_PCIE_MLW,
+	CGS_SYSTEM_INFO_CG_FLAGS,
+	CGS_SYSTEM_INFO_PG_FLAGS,
 	CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0874ab4..cf01177 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -174,6 +174,8 @@
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 	uint32_t i;
+	struct cgs_system_info sys_info = {0};
+	int result;
 
 	cz_hwmgr->gfx_ramp_step = 256*25/100;
 
@@ -247,6 +249,22 @@
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 				   PHM_PlatformCaps_DisableVoltageIsland);
 
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_UVDPowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_VCEPowerGating);
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+	if (!result) {
+		if (sys_info.value & AMD_PG_SUPPORT_UVD)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_UVDPowerGating);
+		if (sys_info.value & AMD_PG_SUPPORT_VCE)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_VCEPowerGating);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 44a9250..980d3bf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -4451,6 +4451,7 @@
 	pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
 	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
 	phw_tonga_ulv_parm *ulv;
+	struct cgs_system_info sys_info = {0};
 
 	PP_ASSERT_WITH_CODE((NULL != hwmgr),
 		"Invalid Parameter!", return -1;);
@@ -4615,9 +4616,23 @@
 
 	data->vddc_phase_shed_control = 0;
 
-	if (0 == result) {
-		struct cgs_system_info sys_info = {0};
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_UVDPowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_VCEPowerGating);
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+	if (!result) {
+		if (sys_info.value & AMD_PG_SUPPORT_UVD)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_UVDPowerGating);
+		if (sys_info.value & AMD_PG_SUPPORT_VCE)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_VCEPowerGating);
+	}
 
+	if (0 == result) {
 		data->is_tlu_enabled = 0;
 		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
 			TONGA_MAX_HARDWARE_POWERLEVELS;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3f74193..9a7b446 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -65,8 +65,6 @@
 	 */
 	state->allow_modeset = true;
 
-	state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
-
 	state->crtcs = kcalloc(dev->mode_config.num_crtc,
 			       sizeof(*state->crtcs), GFP_KERNEL);
 	if (!state->crtcs)
@@ -83,16 +81,6 @@
 				      sizeof(*state->plane_states), GFP_KERNEL);
 	if (!state->plane_states)
 		goto fail;
-	state->connectors = kcalloc(state->num_connector,
-				    sizeof(*state->connectors),
-				    GFP_KERNEL);
-	if (!state->connectors)
-		goto fail;
-	state->connector_states = kcalloc(state->num_connector,
-					  sizeof(*state->connector_states),
-					  GFP_KERNEL);
-	if (!state->connector_states)
-		goto fail;
 
 	state->dev = dev;
 
@@ -823,19 +811,27 @@
 
 	index = drm_connector_index(connector);
 
-	/*
-	 * Construction of atomic state updates can race with a connector
-	 * hot-add which might overflow. In this case flip the table and just
-	 * restart the entire ioctl - no one is fast enough to livelock a cpu
-	 * with physical hotplug events anyway.
-	 *
-	 * Note that we only grab the indexes once we have the right lock to
-	 * prevent hotplug/unplugging of connectors. So removal is no problem,
-	 * at most the array is a bit too large.
-	 */
 	if (index >= state->num_connector) {
-		DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
-		return ERR_PTR(-EAGAIN);
+		struct drm_connector **c;
+		struct drm_connector_state **cs;
+		int alloc = max(index + 1, config->num_connector);
+
+		c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
+		if (!c)
+			return ERR_PTR(-ENOMEM);
+
+		state->connectors = c;
+		memset(&state->connectors[state->num_connector], 0,
+		       sizeof(*state->connectors) * (alloc - state->num_connector));
+
+		cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
+		if (!cs)
+			return ERR_PTR(-ENOMEM);
+
+		state->connector_states = cs;
+		memset(&state->connector_states[state->num_connector], 0,
+		       sizeof(*state->connector_states) * (alloc - state->num_connector));
+		state->num_connector = alloc;
 	}
 
 	if (state->connector_states[index])
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7c52306..4f2d3e1 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1493,7 +1493,7 @@
 {
 	int i;
 
-	for (i = 0; i < dev->mode_config.num_connector; i++) {
+	for (i = 0; i < state->num_connector; i++) {
 		struct drm_connector *connector = state->connectors[i];
 
 		if (!connector)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d40bab2..f619121 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -918,12 +918,19 @@
 	connector->base.properties = &connector->properties;
 	connector->dev = dev;
 	connector->funcs = funcs;
+
+	connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+	if (connector->connector_id < 0) {
+		ret = connector->connector_id;
+		goto out_put;
+	}
+
 	connector->connector_type = connector_type;
 	connector->connector_type_id =
 		ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
 	if (connector->connector_type_id < 0) {
 		ret = connector->connector_type_id;
-		goto out_put;
+		goto out_put_id;
 	}
 	connector->name =
 		kasprintf(GFP_KERNEL, "%s-%d",
@@ -931,7 +938,7 @@
 			  connector->connector_type_id);
 	if (!connector->name) {
 		ret = -ENOMEM;
-		goto out_put;
+		goto out_put_type_id;
 	}
 
 	INIT_LIST_HEAD(&connector->probed_modes);
@@ -959,7 +966,12 @@
 	}
 
 	connector->debugfs_entry = NULL;
-
+out_put_type_id:
+	if (ret)
+		ida_remove(connector_ida, connector->connector_type_id);
+out_put_id:
+	if (ret)
+		ida_remove(&config->connector_ida, connector->connector_id);
 out_put:
 	if (ret)
 		drm_mode_object_put(dev, &connector->base);
@@ -996,6 +1008,9 @@
 	ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
 		   connector->connector_type_id);
 
+	ida_remove(&dev->mode_config.connector_ida,
+		   connector->connector_id);
+
 	kfree(connector->display_info.bus_formats);
 	drm_mode_object_put(dev, &connector->base);
 	kfree(connector->name);
@@ -1013,32 +1028,6 @@
 EXPORT_SYMBOL(drm_connector_cleanup);
 
 /**
- * drm_connector_index - find the index of a registered connector
- * @connector: connector to find index for
- *
- * Given a registered connector, return the index of that connector within a DRM
- * device's list of connectors.
- */
-unsigned int drm_connector_index(struct drm_connector *connector)
-{
-	unsigned int index = 0;
-	struct drm_connector *tmp;
-	struct drm_mode_config *config = &connector->dev->mode_config;
-
-	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
-	drm_for_each_connector(tmp, connector->dev) {
-		if (tmp == connector)
-			return index;
-
-		index++;
-	}
-
-	BUG();
-}
-EXPORT_SYMBOL(drm_connector_index);
-
-/**
  * drm_connector_register - register a connector
  * @connector: the connector to register
  *
@@ -5789,6 +5778,7 @@
 	INIT_LIST_HEAD(&dev->mode_config.plane_list);
 	idr_init(&dev->mode_config.crtc_idr);
 	idr_init(&dev->mode_config.tile_idr);
+	ida_init(&dev->mode_config.connector_ida);
 
 	drm_modeset_lock_all(dev);
 	drm_mode_create_standard_properties(dev);
@@ -5869,6 +5859,7 @@
 		crtc->funcs->destroy(crtc);
 	}
 
+	ida_destroy(&dev->mode_config.connector_ida);
 	idr_destroy(&dev->mode_config.tile_idr);
 	idr_destroy(&dev->mode_config.crtc_idr);
 	drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6ed90a2..27fbd79 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -803,6 +803,18 @@
 	return mstb;
 }
 
+static void drm_dp_free_mst_port(struct kref *kref);
+
+static void drm_dp_free_mst_branch_device(struct kref *kref)
+{
+	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+	if (mstb->port_parent) {
+		if (list_empty(&mstb->port_parent->next))
+			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
+	}
+	kfree(mstb);
+}
+
 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 {
 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
@@ -810,6 +822,15 @@
 	bool wake_tx = false;
 
 	/*
+	 * init kref again to be used by ports to remove mst branch when it is
+	 * not needed anymore
+	 */
+	kref_init(kref);
+
+	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
+		kref_get(&mstb->port_parent->kref);
+
+	/*
 	 * destroy all ports - don't need lock
 	 * as there are no more references to the mst branch
 	 * device at this point.
@@ -835,7 +856,8 @@
 
 	if (wake_tx)
 		wake_up(&mstb->mgr->tx_waitq);
-	kfree(mstb);
+
+	kref_put(kref, drm_dp_free_mst_branch_device);
 }
 
 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -883,6 +905,7 @@
 			 * from an EDID retrieval */
 
 			mutex_lock(&mgr->destroy_connector_lock);
+			kref_get(&port->parent->kref);
 			list_add(&port->next, &mgr->destroy_connector_list);
 			mutex_unlock(&mgr->destroy_connector_lock);
 			schedule_work(&mgr->destroy_connector_work);
@@ -1018,18 +1041,27 @@
 	return send_link;
 }
 
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
-				   struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
 {
 	int ret;
-	if (port->dpcd_rev >= 0x12) {
-		port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
-		if (!port->guid_valid) {
-			ret = drm_dp_send_dpcd_write(mstb->mgr,
-						     port,
-						     DP_GUID,
-						     16, port->guid);
-			port->guid_valid = true;
+
+	memcpy(mstb->guid, guid, 16);
+
+	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+		if (mstb->port_parent) {
+			ret = drm_dp_send_dpcd_write(
+					mstb->mgr,
+					mstb->port_parent,
+					DP_GUID,
+					16,
+					mstb->guid);
+		} else {
+
+			ret = drm_dp_dpcd_write(
+					mstb->mgr->aux,
+					DP_GUID,
+					mstb->guid,
+					16);
 		}
 	}
 }
@@ -1086,7 +1118,6 @@
 	port->dpcd_rev = port_msg->dpcd_revision;
 	port->num_sdp_streams = port_msg->num_sdp_streams;
 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
-	memcpy(port->guid, port_msg->peer_guid, 16);
 
 	/* manage mstb port lists with mgr lock - take a reference
 	   for this list */
@@ -1099,11 +1130,9 @@
 
 	if (old_ddps != port->ddps) {
 		if (port->ddps) {
-			drm_dp_check_port_guid(mstb, port);
 			if (!port->input)
 				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
 		} else {
-			port->guid_valid = false;
 			port->available_pbn = 0;
 			}
 	}
@@ -1162,10 +1191,8 @@
 
 	if (old_ddps != port->ddps) {
 		if (port->ddps) {
-			drm_dp_check_port_guid(mstb, port);
 			dowork = true;
 		} else {
-			port->guid_valid = false;
 			port->available_pbn = 0;
 		}
 	}
@@ -1222,13 +1249,14 @@
 	struct drm_dp_mst_branch *found_mstb;
 	struct drm_dp_mst_port *port;
 
+	if (memcmp(mstb->guid, guid, 16) == 0)
+		return mstb;
+
+
 	list_for_each_entry(port, &mstb->ports, next) {
 		if (!port->mstb)
 			continue;
 
-		if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
-			return port->mstb;
-
 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
 
 		if (found_mstb)
@@ -1247,10 +1275,7 @@
 	/* find the port by iterating down */
 	mutex_lock(&mgr->lock);
 
-	if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
-		mstb = mgr->mst_primary;
-	else
-		mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
 
 	if (mstb)
 		kref_get(&mstb->kref);
@@ -1555,6 +1580,9 @@
 				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
 				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
 			}
+
+			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
 				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
 			}
@@ -1602,6 +1630,37 @@
 	return 0;
 }
 
+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
+{
+	if (!mstb->port_parent)
+		return NULL;
+
+	if (mstb->port_parent->mstb != mstb)
+		return mstb->port_parent;
+
+	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+									 struct drm_dp_mst_branch *mstb,
+									 int *port_num)
+{
+	struct drm_dp_mst_branch *rmstb = NULL;
+	struct drm_dp_mst_port *found_port;
+	mutex_lock(&mgr->lock);
+	if (mgr->mst_primary) {
+		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+
+		if (found_port) {
+			rmstb = found_port->parent;
+			kref_get(&rmstb->kref);
+			*port_num = found_port->port_num;
+		}
+	}
+	mutex_unlock(&mgr->lock);
+	return rmstb;
+}
+
 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 				   struct drm_dp_mst_port *port,
 				   int id,
@@ -1609,13 +1668,18 @@
 {
 	struct drm_dp_sideband_msg_tx *txmsg;
 	struct drm_dp_mst_branch *mstb;
-	int len, ret;
+	int len, ret, port_num;
 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
 	int i;
 
+	port_num = port->port_num;
 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
-	if (!mstb)
-		return -EINVAL;
+	if (!mstb) {
+		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+		if (!mstb)
+			return -EINVAL;
+	}
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
 	if (!txmsg) {
@@ -1627,7 +1691,7 @@
 		sinks[i] = i;
 
 	txmsg->dst = mstb;
-	len = build_allocate_payload(txmsg, port->port_num,
+	len = build_allocate_payload(txmsg, port_num,
 				     id,
 				     pbn, port->num_sdp_streams, sinks);
 
@@ -1983,6 +2047,12 @@
 		mgr->mst_primary = mstb;
 		kref_get(&mgr->mst_primary->kref);
 
+		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+		if (ret < 0) {
+			goto out_unlock;
+		}
+
 		{
 			struct drm_dp_payload reset_pay;
 			reset_pay.start_slot = 0;
@@ -1990,26 +2060,6 @@
 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
 		}
 
-		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
-					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
-		if (ret < 0) {
-			goto out_unlock;
-		}
-
-
-		/* sort out guid */
-		ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
-		if (ret != 16) {
-			DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
-			goto out_unlock;
-		}
-
-		mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
-		if (!mgr->guid_valid) {
-			ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
-			mgr->guid_valid = true;
-		}
-
 		queue_work(system_long_wq, &mgr->work);
 
 		ret = 0;
@@ -2231,6 +2281,7 @@
 			}
 
 			drm_dp_update_port(mstb, &msg.u.conn_stat);
+
 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
 			(*mgr->cbs->hotplug)(mgr);
 
@@ -2446,6 +2497,7 @@
 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
 		if (pbn == port->vcpi.pbn) {
 			*slots = port->vcpi.num_slots;
+			drm_dp_put_port(port);
 			return true;
 		}
 	}
@@ -2605,32 +2657,31 @@
  */
 int drm_dp_calc_pbn_mode(int clock, int bpp)
 {
-	fixed20_12 pix_bw;
-	fixed20_12 fbpp;
-	fixed20_12 result;
-	fixed20_12 margin, tmp;
-	u32 res;
+	u64 kbps;
+	s64 peak_kbps;
+	u32 numerator;
+	u32 denominator;
 
-	pix_bw.full = dfixed_const(clock);
-	fbpp.full = dfixed_const(bpp);
-	tmp.full = dfixed_const(8);
-	fbpp.full = dfixed_div(fbpp, tmp);
+	kbps = clock * bpp;
 
-	result.full = dfixed_mul(pix_bw, fbpp);
-	margin.full = dfixed_const(54);
-	tmp.full = dfixed_const(64);
-	margin.full = dfixed_div(margin, tmp);
-	result.full = dfixed_div(result, margin);
+	/*
+	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+	 * common multiplier to render an integer PBN for all link rate/lane
+	 * counts combinations
+	 * calculate
+	 * peak_kbps *= (1006/1000)
+	 * peak_kbps *= (64/54)
+	 * peak_kbps *= 8    convert to bytes
+	 */
 
-	margin.full = dfixed_const(1006);
-	tmp.full = dfixed_const(1000);
-	margin.full = dfixed_div(margin, tmp);
-	result.full = dfixed_mul(result, margin);
+	numerator = 64 * 1006;
+	denominator = 54 * 8 * 1000 * 1000;
 
-	result.full = dfixed_div(result, tmp);
-	result.full = dfixed_ceil(result);
-	res = dfixed_trunc(result);
-	return res;
+	kbps *= numerator;
+	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+	return drm_fixp2int_ceil(peak_kbps);
 }
 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
 
@@ -2638,11 +2689,23 @@
 {
 	int ret;
 	ret = drm_dp_calc_pbn_mode(154000, 30);
-	if (ret != 689)
+	if (ret != 689) {
+		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+				154000, 30, 689, ret);
 		return -EINVAL;
+	}
 	ret = drm_dp_calc_pbn_mode(234000, 30);
-	if (ret != 1047)
+	if (ret != 1047) {
+		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+				234000, 30, 1047, ret);
 		return -EINVAL;
+	}
+	ret = drm_dp_calc_pbn_mode(297000, 24);
+	if (ret != 1063) {
+		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+				297000, 24, 1063, ret);
+		return -EINVAL;
+	}
 	return 0;
 }
 
@@ -2783,6 +2846,13 @@
 	mutex_unlock(&mgr->qlock);
 }
 
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
+	kfree(port);
+}
+
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2803,13 +2873,22 @@
 		list_del(&port->next);
 		mutex_unlock(&mgr->destroy_connector_lock);
 
+		kref_init(&port->kref);
+		INIT_LIST_HEAD(&port->next);
+
 		mgr->cbs->destroy_connector(mgr, port->connector);
 
 		drm_dp_port_teardown_pdt(port, port->pdt);
 
-		if (!port->input && port->vcpi.vcpi > 0)
-			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-		kfree(port);
+		if (!port->input && port->vcpi.vcpi > 0) {
+			if (mgr->mst_state) {
+				drm_dp_mst_reset_vcpi_slots(mgr, port);
+				drm_dp_update_payload_part1(mgr);
+				drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+			}
+		}
+
+		kref_put(&port->kref, drm_dp_free_mst_port);
 		send_hotplug = true;
 	}
 	if (send_hotplug)
@@ -2847,6 +2926,9 @@
 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
 	mgr->max_payloads = max_payloads;
 	mgr->conn_base_id = conn_base_id;
+	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
+	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
+		return -EINVAL;
 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
 	if (!mgr->payloads)
 		return -ENOMEM;
@@ -2854,7 +2936,9 @@
 	if (!mgr->proposed_vcpis)
 		return -ENOMEM;
 	set_bit(0, &mgr->payload_mask);
-	test_calc_pbn_mode();
+	if (test_calc_pbn_mode() < 0)
+		DRM_ERROR("MST PBN self-test failed\n");
+
 	return 0;
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index d12a4ef..1fe1457 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -224,6 +224,64 @@
 		diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
 	}
 
+	/*
+	 * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
+	 * interval? If so then vblank irqs keep running and it will likely
+	 * happen that the hardware vblank counter is not trustworthy as it
+	 * might reset at some point in that interval and vblank timestamps
+	 * are not trustworthy either in that interval. Iow. this can result
+	 * in a bogus diff >> 1 which must be avoided as it would cause
+	 * random large forward jumps of the software vblank counter.
+	 */
+	if (diff > 1 && (vblank->inmodeset & 0x2)) {
+		DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
+			      " due to pre-modeset.\n", pipe, diff);
+		diff = 1;
+	}
+
+	/*
+	 * FIMXE: Need to replace this hack with proper seqlocks.
+	 *
+	 * Restrict the bump of the software vblank counter to a safe maximum
+	 * value of +1 whenever there is the possibility that concurrent readers
+	 * of vblank timestamps could be active at the moment, as the current
+	 * implementation of the timestamp caching and updating is not safe
+	 * against concurrent readers for calls to store_vblank() with a bump
+	 * of anything but +1. A bump != 1 would very likely return corrupted
+	 * timestamps to userspace, because the same slot in the cache could
+	 * be concurrently written by store_vblank() and read by one of those
+	 * readers without the read-retry logic detecting the collision.
+	 *
+	 * Concurrent readers can exist when we are called from the
+	 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
+	 * irq callers. However, all those calls to us are happening with the
+	 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
+	 * can't increase while we are executing. Therefore a zero refcount at
+	 * this point is safe for arbitrary counter bumps if we are called
+	 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
+	 * we must also accept a refcount of 1, as whenever we are called from
+	 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
+	 * we must let that one pass through in order to not lose vblank counts
+	 * during vblank irq off - which would completely defeat the whole
+	 * point of this routine.
+	 *
+	 * Whenever we are called from vblank irq, we have to assume concurrent
+	 * readers exist or can show up any time during our execution, even if
+	 * the refcount is currently zero, as vblank irqs are usually only
+	 * enabled due to the presence of readers, and because when we are called
+	 * from vblank irq we can't hold the vbl_lock to protect us from sudden
+	 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
+	 * called from vblank irq.
+	 */
+	if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
+	    (flags & DRM_CALLED_FROM_VBLIRQ))) {
+		DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
+			      "refcount %u, vblirq %u\n", pipe, diff,
+			      atomic_read(&vblank->refcount),
+			      (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
+		diff = 1;
+	}
+
 	DRM_DEBUG_VBL("updating vblank count on crtc %u:"
 		      " current=%u, diff=%u, hw=%u hw_last=%u\n",
 		      pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1316,7 +1374,13 @@
 	spin_lock_irqsave(&dev->event_lock, irqflags);
 
 	spin_lock(&dev->vbl_lock);
-	vblank_disable_and_save(dev, pipe);
+	DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+		      pipe, vblank->enabled, vblank->inmodeset);
+
+	/* Avoid redundant vblank disables without previous drm_vblank_on(). */
+	if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
+		vblank_disable_and_save(dev, pipe);
+
 	wake_up(&vblank->queue);
 
 	/*
@@ -1418,6 +1482,9 @@
 		return;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+		      pipe, vblank->enabled, vblank->inmodeset);
+
 	/* Drop our private "prevent drm_vblank_get" refcount */
 	if (vblank->inmodeset) {
 		atomic_dec(&vblank->refcount);
@@ -1430,8 +1497,7 @@
 	 * re-enable interrupts if there are users left, or the
 	 * user wishes vblank interrupts to be enabled all the time.
 	 */
-	if (atomic_read(&vblank->refcount) != 0 ||
-	    (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
+	if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
 		WARN_ON(drm_vblank_enable(dev, pipe));
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
@@ -1526,6 +1592,7 @@
 	if (vblank->inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		dev->vblank_disable_allowed = true;
+		drm_reset_vblank_timestamp(dev, pipe);
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
 		if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83efca9..f17d392 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
 config DRM_EXYNOS
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
-	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+	depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
 	select DRM_KMS_HELPER
 	select DRM_KMS_FB_HELPER
 	select FB_CFB_FILLRECT
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1bf6a21..162ab93 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -93,7 +93,7 @@
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
 		return -EPERM;
 
-	if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
+	if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
 		val = VIDINTCON0_INTEN;
 		if (ctx->out_type == IFTYPE_I80)
 			val |= VIDINTCON0_FRAMEDONE;
@@ -402,8 +402,6 @@
 		decon_enable_vblank(ctx->crtc);
 
 	decon_commit(ctx->crtc);
-
-	set_bit(BIT_SUSPENDED, &ctx->flags);
 }
 
 static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -582,9 +580,9 @@
 static int exynos5433_decon_suspend(struct device *dev)
 {
 	struct decon_context *ctx = dev_get_drvdata(dev);
-	int i;
+	int i = ARRAY_SIZE(decon_clks_name);
 
-	for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
+	while (--i >= 0)
 		clk_disable_unprepare(ctx->clks[i]);
 
 	return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index b79c316..673164b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1392,7 +1392,7 @@
 static int exynos_dp_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
+	struct device_node *np = NULL, *endpoint = NULL;
 	struct exynos_dp_device *dp;
 	int ret;
 
@@ -1404,41 +1404,36 @@
 	platform_set_drvdata(pdev, dp);
 
 	/* This is for the backward compatibility. */
-	panel_node = of_parse_phandle(dev->of_node, "panel", 0);
-	if (panel_node) {
-		dp->panel = of_drm_find_panel(panel_node);
-		of_node_put(panel_node);
+	np = of_parse_phandle(dev->of_node, "panel", 0);
+	if (np) {
+		dp->panel = of_drm_find_panel(np);
+		of_node_put(np);
 		if (!dp->panel)
 			return -EPROBE_DEFER;
-	} else {
-		endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
-		if (endpoint) {
-			panel_node = of_graph_get_remote_port_parent(endpoint);
-			if (panel_node) {
-				dp->panel = of_drm_find_panel(panel_node);
-				of_node_put(panel_node);
-				if (!dp->panel)
-					return -EPROBE_DEFER;
-			} else {
-				DRM_ERROR("no port node for panel device.\n");
-				return -EINVAL;
-			}
-		}
-	}
-
-	if (endpoint)
 		goto out;
+	}
 
 	endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
 	if (endpoint) {
-		bridge_node = of_graph_get_remote_port_parent(endpoint);
-		if (bridge_node) {
-			dp->ptn_bridge = of_drm_find_bridge(bridge_node);
-			of_node_put(bridge_node);
-			if (!dp->ptn_bridge)
-				return -EPROBE_DEFER;
-		} else
-			return -EPROBE_DEFER;
+		np = of_graph_get_remote_port_parent(endpoint);
+		if (np) {
+			/* The remote port can be either a panel or a bridge */
+			dp->panel = of_drm_find_panel(np);
+			if (!dp->panel) {
+				dp->ptn_bridge = of_drm_find_bridge(np);
+				if (!dp->ptn_bridge) {
+					of_node_put(np);
+					return -EPROBE_DEFER;
+				}
+			}
+			of_node_put(np);
+		} else {
+			DRM_ERROR("no remote endpoint device node found.\n");
+			return -EINVAL;
+		}
+	} else {
+		DRM_ERROR("no port endpoint subnode found.\n");
+		return -EINVAL;
 	}
 
 out:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index d84a498..26e81d19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1782,6 +1782,7 @@
 
 	bridge = of_drm_find_bridge(dsi->bridge_node);
 	if (bridge) {
+		encoder->bridge = bridge;
 		drm_bridge_attach(drm_dev, bridge);
 	}
 
@@ -1906,8 +1907,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int exynos_dsi_suspend(struct device *dev)
+static int __maybe_unused exynos_dsi_suspend(struct device *dev)
 {
 	struct drm_encoder *encoder = dev_get_drvdata(dev);
 	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1938,7 +1938,7 @@
 	return 0;
 }
 
-static int exynos_dsi_resume(struct device *dev)
+static int __maybe_unused exynos_dsi_resume(struct device *dev)
 {
 	struct drm_encoder *encoder = dev_get_drvdata(dev);
 	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1972,7 +1972,6 @@
 
 	return ret;
 }
-#endif
 
 static const struct dev_pm_ops exynos_dsi_pm_ops = {
 	SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index f6118ba..8baabd8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@
 	if (vm_size > exynos_gem->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages,
+	ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
 			     exynos_gem->dma_addr, exynos_gem->size,
 			     &exynos_gem->dma_attrs);
 	if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index c747824..8a4f4a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1723,7 +1723,7 @@
 		goto err_put_clk;
 	}
 
-	DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
+	DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
 
 	spin_lock_init(&ctx->lock);
 	platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index c17efdb..8dfe6e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1166,7 +1166,7 @@
 		goto err_free_event;
 	}
 
-	cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
+	cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
 
 	if (copy_from_user(cmdlist->data + cmdlist->last,
 				(void __user *)cmd,
@@ -1184,7 +1184,8 @@
 	if (req->cmd_buf_nr) {
 		struct drm_exynos_g2d_cmd *cmd_buf;
 
-		cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
+		cmd_buf = (struct drm_exynos_g2d_cmd *)
+				(unsigned long)req->cmd_buf;
 
 		if (copy_from_user(cmdlist->data + cmdlist->last,
 					(void __user *)cmd_buf,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 32358c5..26b5e4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@
 		return ERR_PTR(ret);
 	}
 
-	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
+	DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
 
 	return exynos_gem;
 }
@@ -335,7 +335,7 @@
 	if (vm_size > exynos_gem->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages,
+	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
 			     exynos_gem->dma_addr, exynos_gem->size,
 			     &exynos_gem->dma_attrs);
 	if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7aecd23..5d20da8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@
 		return ret;
 	}
 
-	DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
+	DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
 
 	mutex_init(&ctx->lock);
 	platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67d2423..95eeb91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@
 	 * e.g PAUSE state, queue buf, command control.
 	 */
 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
-		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
+		DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
 
 		mutex_lock(&ippdrv->cmd_lock);
 		list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,8 +388,8 @@
 	}
 	property->prop_id = ret;
 
-	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
-		property->prop_id, property->cmd, (int)ippdrv);
+	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
+		property->prop_id, property->cmd, ippdrv);
 
 	/* stored property information and ippdrv in private data */
 	c_node->property = *property;
@@ -518,7 +518,7 @@
 {
 	int i;
 
-	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+	DRM_DEBUG_KMS("node[%p]\n", m_node);
 
 	if (!m_node) {
 		DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@
 	m_node->buf_id = qbuf->buf_id;
 	INIT_LIST_HEAD(&m_node->list);
 
-	DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
+	DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
 	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
 
 	for_each_ipp_planar(i) {
@@ -582,8 +582,8 @@
 
 			buf_info->handles[i] = qbuf->handle[i];
 			buf_info->base[i] = *addr;
-			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
-				      buf_info->base[i], buf_info->handles[i]);
+			DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
+				      &buf_info->base[i], buf_info->handles[i]);
 		}
 	}
 
@@ -664,7 +664,7 @@
 
 	mutex_lock(&c_node->event_lock);
 	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
-		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
+		DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
 
 		/*
 		 * qbuf == NULL condition means all event deletion.
@@ -755,7 +755,7 @@
 
 	/* find memory node from memory list */
 	list_for_each_entry(m_node, head, list) {
-		DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
+		DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
 
 		/* compare buffer id */
 		if (m_node->buf_id == qbuf->buf_id)
@@ -772,7 +772,7 @@
 	struct exynos_drm_ipp_ops *ops = NULL;
 	int ret = 0;
 
-	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+	DRM_DEBUG_KMS("node[%p]\n", m_node);
 
 	if (!m_node) {
 		DRM_ERROR("invalid queue node.\n");
@@ -1237,7 +1237,7 @@
 			m_node = list_first_entry(head,
 				struct drm_exynos_ipp_mem_node, list);
 
-			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
+			DRM_DEBUG_KMS("m_node[%p]\n", m_node);
 
 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
 			if (ret) {
@@ -1610,8 +1610,8 @@
 		}
 		ippdrv->prop_list.ipp_id = ret;
 
-		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
-			count++, (int)ippdrv, ret);
+		DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
+			count++, ippdrv, ret);
 
 		/* store parent device for node */
 		ippdrv->parent_dev = dev;
@@ -1668,7 +1668,7 @@
 
 	file_priv->ipp_dev = dev;
 
-	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
+	DRM_DEBUG_KMS("done priv[%p]\n", dev);
 
 	return 0;
 }
@@ -1685,8 +1685,8 @@
 		mutex_lock(&ippdrv->cmd_lock);
 		list_for_each_entry_safe(c_node, tc_node,
 			&ippdrv->cmd_list, list) {
-			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
-				count++, (int)ippdrv);
+			DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
+				count++, ippdrv);
 
 			if (c_node->filp == file) {
 				/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 4eaef36..9869d70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -18,6 +18,7 @@
 #include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/clk.h>
+#include <linux/component.h>
 #include <drm/drmP.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
@@ -306,9 +307,9 @@
 	return ret;
 }
 
-void mic_disable(struct drm_bridge *bridge) { }
+static void mic_disable(struct drm_bridge *bridge) { }
 
-void mic_post_disable(struct drm_bridge *bridge)
+static void mic_post_disable(struct drm_bridge *bridge)
 {
 	struct exynos_mic *mic = bridge->driver_private;
 	int i;
@@ -328,7 +329,7 @@
 	mutex_unlock(&mic_mutex);
 }
 
-void mic_pre_enable(struct drm_bridge *bridge)
+static void mic_pre_enable(struct drm_bridge *bridge)
 {
 	struct exynos_mic *mic = bridge->driver_private;
 	int ret, i;
@@ -371,11 +372,35 @@
 	mutex_unlock(&mic_mutex);
 }
 
-void mic_enable(struct drm_bridge *bridge) { }
+static void mic_enable(struct drm_bridge *bridge) { }
 
-void mic_destroy(struct drm_bridge *bridge)
+static const struct drm_bridge_funcs mic_bridge_funcs = {
+	.disable = mic_disable,
+	.post_disable = mic_post_disable,
+	.pre_enable = mic_pre_enable,
+	.enable = mic_enable,
+};
+
+static int exynos_mic_bind(struct device *dev, struct device *master,
+			   void *data)
 {
-	struct exynos_mic *mic = bridge->driver_private;
+	struct exynos_mic *mic = dev_get_drvdata(dev);
+	int ret;
+
+	mic->bridge.funcs = &mic_bridge_funcs;
+	mic->bridge.of_node = dev->of_node;
+	mic->bridge.driver_private = mic;
+	ret = drm_bridge_add(&mic->bridge);
+	if (ret)
+		DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
+
+	return ret;
+}
+
+static void exynos_mic_unbind(struct device *dev, struct device *master,
+			      void *data)
+{
+	struct exynos_mic *mic = dev_get_drvdata(dev);
 	int i;
 
 	mutex_lock(&mic_mutex);
@@ -387,16 +412,16 @@
 
 already_disabled:
 	mutex_unlock(&mic_mutex);
+
+	drm_bridge_remove(&mic->bridge);
 }
 
-static const struct drm_bridge_funcs mic_bridge_funcs = {
-	.disable = mic_disable,
-	.post_disable = mic_post_disable,
-	.pre_enable = mic_pre_enable,
-	.enable = mic_enable,
+static const struct component_ops exynos_mic_component_ops = {
+	.bind	= exynos_mic_bind,
+	.unbind	= exynos_mic_unbind,
 };
 
-int exynos_mic_probe(struct platform_device *pdev)
+static int exynos_mic_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct exynos_mic *mic;
@@ -435,17 +460,8 @@
 		goto err;
 	}
 
-	mic->bridge.funcs = &mic_bridge_funcs;
-	mic->bridge.of_node = dev->of_node;
-	mic->bridge.driver_private = mic;
-	ret = drm_bridge_add(&mic->bridge);
-	if (ret) {
-		DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
-		goto err;
-	}
-
 	for (i = 0; i < NUM_CLKS; i++) {
-		mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]);
+		mic->clks[i] = devm_clk_get(dev, clk_names[i]);
 		if (IS_ERR(mic->clks[i])) {
 			DRM_ERROR("mic: Failed to get clock (%s)\n",
 								clk_names[i]);
@@ -454,7 +470,10 @@
 		}
 	}
 
+	platform_set_drvdata(pdev, mic);
+
 	DRM_DEBUG_KMS("MIC has been probed\n");
+	return component_add(dev, &exynos_mic_component_ops);
 
 err:
 	return ret;
@@ -462,14 +481,7 @@
 
 static int exynos_mic_remove(struct platform_device *pdev)
 {
-	struct exynos_mic *mic = platform_get_drvdata(pdev);
-	int i;
-
-	drm_bridge_remove(&mic->bridge);
-
-	for (i = NUM_CLKS - 1; i > -1; i--)
-		clk_put(mic->clks[i]);
-
+	component_del(&pdev->dev, &exynos_mic_component_ops);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index bea0f78..ce59f44 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -754,7 +754,7 @@
 		goto err_ippdrv_register;
 	}
 
-	DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv);
+	DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
 
 	platform_set_drvdata(pdev, rot);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 62ac4e5..b605bd7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -223,7 +223,7 @@
 	}
 }
 
-static int vidi_show_connection(struct device *dev,
+static ssize_t vidi_show_connection(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -238,7 +238,7 @@
 	return rc;
 }
 
-static int vidi_store_connection(struct device *dev,
+static ssize_t vidi_store_connection(struct device *dev,
 				struct device_attribute *attr,
 				const char *buf, size_t len)
 {
@@ -294,7 +294,9 @@
 	}
 
 	if (vidi->connection) {
-		struct edid *raw_edid  = (struct edid *)(uint32_t)vidi->edid;
+		struct edid *raw_edid;
+
+		raw_edid = (struct edid *)(unsigned long)vidi->edid;
 		if (!drm_edid_is_valid(raw_edid)) {
 			DRM_DEBUG_KMS("edid data is invalid.\n");
 			return -EINVAL;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index b5fbc1c..0a5a600 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1289,8 +1289,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_mixer_suspend(struct device *dev)
+static int __maybe_unused exynos_mixer_suspend(struct device *dev)
 {
 	struct mixer_context *ctx = dev_get_drvdata(dev);
 	struct mixer_resources *res = &ctx->mixer_res;
@@ -1306,7 +1305,7 @@
 	return 0;
 }
 
-static int exynos_mixer_resume(struct device *dev)
+static int __maybe_unused exynos_mixer_resume(struct device *dev)
 {
 	struct mixer_context *ctx = dev_get_drvdata(dev);
 	struct mixer_resources *res = &ctx->mixer_res;
@@ -1342,7 +1341,6 @@
 
 	return 0;
 }
-#endif
 
 static const struct dev_pm_ops exynos_mixer_pm_ops = {
 	SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 533d1e3..a02112b 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -136,6 +136,7 @@
 	case ADV7511_REG_BKSV(3):
 	case ADV7511_REG_BKSV(4):
 	case ADV7511_REG_DDC_STATUS:
+	case ADV7511_REG_EDID_READ_CTRL:
 	case ADV7511_REG_BSTATUS(0):
 	case ADV7511_REG_BSTATUS(1):
 	case ADV7511_REG_CHIP_ID_HIGH:
@@ -362,24 +363,31 @@
 {
 	adv7511->current_edid_segment = -1;
 
-	regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-		     ADV7511_INT0_EDID_READY);
-	regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-		     ADV7511_INT1_DDC_ERROR);
 	regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
 			   ADV7511_POWER_POWER_DOWN, 0);
+	if (adv7511->i2c_main->irq) {
+		/*
+		 * Documentation says the INT_ENABLE registers are reset in
+		 * POWER_DOWN mode. My 7511w preserved the bits, however.
+		 * Still, let's be safe and stick to the documentation.
+		 */
+		regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+			     ADV7511_INT0_EDID_READY);
+		regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+			     ADV7511_INT1_DDC_ERROR);
+	}
 
 	/*
-	 * Per spec it is allowed to pulse the HDP signal to indicate that the
+	 * Per spec it is allowed to pulse the HPD signal to indicate that the
 	 * EDID information has changed. Some monitors do this when they wakeup
-	 * from standby or are enabled. When the HDP goes low the adv7511 is
+	 * from standby or are enabled. When the HPD goes low the adv7511 is
 	 * reset and the outputs are disabled which might cause the monitor to
-	 * go to standby again. To avoid this we ignore the HDP pin for the
+	 * go to standby again. To avoid this we ignore the HPD pin for the
 	 * first few seconds after enabling the output.
 	 */
 	regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-			   ADV7511_REG_POWER2_HDP_SRC_MASK,
-			   ADV7511_REG_POWER2_HDP_SRC_NONE);
+			   ADV7511_REG_POWER2_HPD_SRC_MASK,
+			   ADV7511_REG_POWER2_HPD_SRC_NONE);
 
 	/*
 	 * Most of the registers are reset during power down or when HPD is low.
@@ -413,9 +421,9 @@
 	if (ret < 0)
 		return false;
 
-	if (irq0 & ADV7511_INT0_HDP) {
+	if (irq0 & ADV7511_INT0_HPD) {
 		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-			     ADV7511_INT0_HDP);
+			     ADV7511_INT0_HPD);
 		return true;
 	}
 
@@ -438,7 +446,7 @@
 	regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
 	regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
 
-	if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+	if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
 		drm_helper_hpd_irq_event(adv7511->encoder->dev);
 
 	if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -567,12 +575,14 @@
 
 	/* Reading the EDID only works if the device is powered */
 	if (!adv7511->powered) {
-		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-			     ADV7511_INT0_EDID_READY);
-		regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-			     ADV7511_INT1_DDC_ERROR);
 		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
 				   ADV7511_POWER_POWER_DOWN, 0);
+		if (adv7511->i2c_main->irq) {
+			regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+				     ADV7511_INT0_EDID_READY);
+			regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+				     ADV7511_INT1_DDC_ERROR);
+		}
 		adv7511->current_edid_segment = -1;
 	}
 
@@ -638,10 +648,10 @@
 		if (adv7511->status == connector_status_connected)
 			status = connector_status_disconnected;
 	} else {
-		/* Renable HDP sensing */
+		/* Renable HPD sensing */
 		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-				   ADV7511_REG_POWER2_HDP_SRC_MASK,
-				   ADV7511_REG_POWER2_HDP_SRC_BOTH);
+				   ADV7511_REG_POWER2_HPD_SRC_MASK,
+				   ADV7511_REG_POWER2_HPD_SRC_BOTH);
 	}
 
 	adv7511->status = status;
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 6599ed5..38515b3 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -90,7 +90,7 @@
 #define ADV7511_CSC_ENABLE			BIT(7)
 #define ADV7511_CSC_UPDATE_MODE			BIT(5)
 
-#define ADV7511_INT0_HDP			BIT(7)
+#define ADV7511_INT0_HPD			BIT(7)
 #define ADV7511_INT0_VSYNC			BIT(5)
 #define ADV7511_INT0_AUDIO_FIFO_FULL		BIT(4)
 #define ADV7511_INT0_EDID_READY			BIT(2)
@@ -157,11 +157,11 @@
 #define ADV7511_PACKET_ENABLE_SPARE2		BIT(1)
 #define ADV7511_PACKET_ENABLE_SPARE1		BIT(0)
 
-#define ADV7511_REG_POWER2_HDP_SRC_MASK		0xc0
-#define ADV7511_REG_POWER2_HDP_SRC_BOTH		0x00
-#define ADV7511_REG_POWER2_HDP_SRC_HDP		0x40
-#define ADV7511_REG_POWER2_HDP_SRC_CEC		0x80
-#define ADV7511_REG_POWER2_HDP_SRC_NONE		0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_MASK		0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH		0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD		0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC		0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE		0xc0
 #define ADV7511_REG_POWER2_TDMS_ENABLE		BIT(4)
 #define ADV7511_REG_POWER2_GATE_INPUT_CLK	BIT(0)
 
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index fcd77b2..051eab3 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -10,7 +10,6 @@
 	# the shmem_readpage() which depends upon tmpfs
 	select SHMEM
 	select TMPFS
-	select STOP_MACHINE
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3ac616d..f357058 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -501,7 +501,9 @@
 				WARN_ON(!IS_SKYLAKE(dev) &&
 					!IS_KABYLAKE(dev));
 			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
-				   (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
+				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+				    pch->subsystem_vendor == 0x1af4 &&
+				    pch->subsystem_device == 0x1100)) {
 				dev_priv->pch_type = intel_virt_detect_pch(dev);
 			} else
 				continue;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f0f75d7..e7cd311 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1988,6 +1988,9 @@
 #define I915_GTT_OFFSET_NONE ((u32)-1)
 
 struct drm_i915_gem_object_ops {
+	unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+
 	/* Interface between the GEM object and its backing storage.
 	 * get_pages() is called once prior to the use of the associated set
 	 * of pages before to binding them into the GTT, and put_pages() is
@@ -2003,6 +2006,7 @@
 	 */
 	int (*get_pages)(struct drm_i915_gem_object *);
 	void (*put_pages)(struct drm_i915_gem_object *);
+
 	int (*dmabuf_export)(struct drm_i915_gem_object *);
 	void (*release)(struct drm_i915_gem_object *);
 };
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ddc21d4..bb44bad 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4425,6 +4425,7 @@
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
 	.get_pages = i915_gem_object_get_pages_gtt,
 	.put_pages = i915_gem_object_put_pages_gtt,
 };
@@ -5261,7 +5262,7 @@
 	struct page *page;
 
 	/* Only default objects have per-page dirty tracking */
-	if (WARN_ON(obj->ops != &i915_gem_object_ops))
+	if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
 		return NULL;
 
 	page = i915_gem_object_get_page(obj, n);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 19fb0bdd..59e45b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -789,9 +789,10 @@
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
-	.dmabuf_export = i915_gem_userptr_dmabuf_export,
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
 	.get_pages = i915_gem_userptr_get_pages,
 	.put_pages = i915_gem_userptr_put_pages,
+	.dmabuf_export = i915_gem_userptr_dmabuf_export,
 	.release = i915_gem_userptr_release,
 };
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 007ae83..4897728 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3287,19 +3287,20 @@
 
 #define PORT_HOTPLUG_STAT	_MMIO(dev_priv->info.display_mmio_offset + 0x61114)
 /*
- * HDMI/DP bits are gen4+
+ * HDMI/DP bits are g4x+
  *
  * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
  * Please check the detailed lore in the commit message for for experimental
  * evidence.
  */
-#define   PORTD_HOTPLUG_LIVE_STATUS_G4X		(1 << 29)
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
+#define   PORTD_HOTPLUG_LIVE_STATUS_GM45	(1 << 29)
+#define   PORTC_HOTPLUG_LIVE_STATUS_GM45	(1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_GM45	(1 << 27)
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+#define   PORTD_HOTPLUG_LIVE_STATUS_G4X		(1 << 27)
 #define   PORTC_HOTPLUG_LIVE_STATUS_G4X		(1 << 28)
-#define   PORTB_HOTPLUG_LIVE_STATUS_G4X		(1 << 27)
-/* VLV DP/HDMI bits again match Bspec */
-#define   PORTD_HOTPLUG_LIVE_STATUS_VLV		(1 << 27)
-#define   PORTC_HOTPLUG_LIVE_STATUS_VLV		(1 << 28)
-#define   PORTB_HOTPLUG_LIVE_STATUS_VLV		(1 << 29)
+#define   PORTB_HOTPLUG_LIVE_STATUS_G4X		(1 << 29)
 #define   PORTD_HOTPLUG_INT_STATUS		(3 << 21)
 #define   PORTD_HOTPLUG_INT_LONG_PULSE		(2 << 21)
 #define   PORTD_HOTPLUG_INT_SHORT_PULSE		(1 << 21)
@@ -7514,7 +7515,7 @@
 #define  DPLL_CFGCR2_PDIV_7 (4<<2)
 #define  DPLL_CFGCR2_CENTRAL_FREQ_MASK	(3)
 
-#define DPLL_CFGCR1(id)	_MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
+#define DPLL_CFGCR1(id)	_MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
 #define DPLL_CFGCR2(id)	_MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
 
 /* BXT display engine PLL */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a2aa09c..a8af594 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -49,7 +49,7 @@
 		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
 		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
 		dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
-	} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+	} else if (INTEL_INFO(dev)->gen <= 4) {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
 		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
@@ -84,7 +84,7 @@
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-	} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+	} else if (INTEL_INFO(dev)->gen <= 4) {
 		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
 		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
 		I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e6408e5..54a165b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1589,7 +1589,8 @@
 			 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
 			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
 			 wrpll_params.central_freq;
-	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		   intel_encoder->type == INTEL_OUTPUT_DP_MST) {
 		switch (crtc_state->port_clock / 2) {
 		case 81000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2f00828..5feb657 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2946,7 +2946,7 @@
 	struct i915_vma *vma;
 	u64 offset;
 
-	intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
+	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
 				intel_plane->base.state);
 
 	vma = i915_gem_obj_to_ggtt_view(obj, &view);
@@ -12075,11 +12075,21 @@
 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
 	}
 
-	/* Clamp bpp to 8 on screens without EDID 1.4 */
-	if (connector->base.display_info.bpc == 0 && bpp > 24) {
-		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
-			      bpp);
-		pipe_config->pipe_bpp = 24;
+	/* Clamp bpp to default limit on screens without EDID 1.4 */
+	if (connector->base.display_info.bpc == 0) {
+		int type = connector->base.connector_type;
+		int clamp_bpp = 24;
+
+		/* Fall back to 18 bpp when DP sink capability is unknown. */
+		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+		    type == DRM_MODE_CONNECTOR_eDP)
+			clamp_bpp = 18;
+
+		if (bpp > clamp_bpp) {
+			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+				      bpp, clamp_bpp);
+			pipe_config->pipe_bpp = clamp_bpp;
+		}
 	}
 }
 
@@ -13883,11 +13893,12 @@
 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	bool can_position = false;
 
-	/* use scaler when colorkey is not required */
-	if (INTEL_INFO(plane->dev)->gen >= 9 &&
-	    state->ckey.flags == I915_SET_COLORKEY_NONE) {
-		min_scale = 1;
-		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+	if (INTEL_INFO(plane->dev)->gen >= 9) {
+		/* use scaler when colorkey is not required */
+		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+			min_scale = 1;
+			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+		}
 		can_position = true;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 796e3d3..1bbd67b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4493,20 +4493,20 @@
 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
 }
 
-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
-				       struct intel_digital_port *port)
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
+					struct intel_digital_port *port)
 {
 	u32 bit;
 
 	switch (port->port) {
 	case PORT_B:
-		bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
 		break;
 	case PORT_C:
-		bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
 		break;
 	case PORT_D:
-		bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
 		break;
 	default:
 		MISSING_CASE(port->port);
@@ -4558,8 +4558,8 @@
 		return cpt_digital_port_connected(dev_priv, port);
 	else if (IS_BROXTON(dev_priv))
 		return bxt_digital_port_connected(dev_priv, port);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		return vlv_digital_port_connected(dev_priv, port);
+	else if (IS_GM45(dev_priv))
+		return gm45_digital_port_connected(dev_priv, port);
 	else
 		return g4x_digital_port_connected(dev_priv, port);
 }
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 8888793..0b8eefc 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -215,27 +215,46 @@
 	}
 }
 
+/*
+ * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * or 1.2 devices that support it, Training Pattern 2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
+{
+	u32 training_pattern = DP_TRAINING_PATTERN_2;
+	bool source_tps3, sink_tps3;
+
+	/*
+	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
+	 * also mandatory for downstream devices that support HBR2. However, not
+	 * all sinks follow the spec.
+	 *
+	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
+	 * supported in source but still not enabled.
+	 */
+	source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
+	sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
+
+	if (source_tps3 && sink_tps3) {
+		training_pattern = DP_TRAINING_PATTERN_3;
+	} else if (intel_dp->link_rate == 540000) {
+		if (!source_tps3)
+			DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+		if (!sink_tps3)
+			DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+	}
+
+	return training_pattern;
+}
+
 static void
 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 {
 	bool channel_eq = false;
 	int tries, cr_tries;
-	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+	u32 training_pattern;
 
-	/*
-	 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
-	 *
-	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
-	 * also mandatory for downstream devices that support HBR2.
-	 *
-	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
-	 * supported but still not enabled.
-	 */
-	if (intel_dp_source_supports_hbr2(intel_dp) &&
-	    drm_dp_tps3_supported(intel_dp->dpcd))
-		training_pattern = DP_TRAINING_PATTERN_3;
-	else if (intel_dp->link_rate == 540000)
-		DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
+	training_pattern = intel_dp_training_pattern(intel_dp);
 
 	/* channel equalization */
 	if (!intel_dp_set_link_train(intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index a5e99ac..e8113ad 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -204,10 +204,28 @@
 	struct drm_device *dev = intel_dsi->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	if (dev_priv->vbt.dsi.seq_version >= 3)
+		data++;
+
 	gpio = *data++;
 
 	/* pull up/down */
-	action = *data++;
+	action = *data++ & 1;
+
+	if (gpio >= ARRAY_SIZE(gtable)) {
+		DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+		goto out;
+	}
+
+	if (!IS_VALLEYVIEW(dev_priv)) {
+		DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
+		goto out;
+	}
+
+	if (dev_priv->vbt.dsi.seq_version >= 3) {
+		DRM_DEBUG_KMS("GPIO element v3 not supported\n");
+		goto out;
+	}
 
 	function = gtable[gpio].function_reg;
 	pad = gtable[gpio].pad_reg;
@@ -226,6 +244,7 @@
 	vlv_gpio_nc_write(dev_priv, pad, val);
 	mutex_unlock(&dev_priv->sb_lock);
 
+out:
 	return data;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 25254b5..deb8282 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -683,7 +683,7 @@
 	return 0;
 
 err:
-	while (--pin) {
+	while (pin--) {
 		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
 			continue;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3aa6147..f1fa756 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1707,6 +1707,7 @@
 	if (flush_domains) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eb5fa05..a234687 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1783,16 +1783,20 @@
 				   const struct intel_plane_state *pstate,
 				   uint32_t mem_value)
 {
-	int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+	/*
+	 * We treat the cursor plane as always-on for the purposes of watermark
+	 * calculation.  Until we have two-stage watermark programming merged,
+	 * this is necessary to avoid flickering.
+	 */
+	int cpp = 4;
+	int width = pstate->visible ? pstate->base.crtc_w : 64;
 
-	if (!cstate->base.active || !pstate->visible)
+	if (!cstate->base.active)
 		return 0;
 
 	return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
 			      cstate->base.adjusted_mode.crtc_htotal,
-			      drm_rect_width(&pstate->dst),
-			      bpp,
-			      mem_value);
+			      width, cpp, mem_value);
 }
 
 /* Only for WM_LP. */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 339701d..40c6aff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -331,6 +331,7 @@
 	if (flush_domains) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
 	if (invalidate_domains) {
@@ -403,6 +404,7 @@
 	if (flush_domains) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
 	if (invalidate_domains) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 78f520d..e3acc35 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1520,7 +1520,7 @@
 				    DMA_BIDIRECTIONAL);
 
 		if (dma_mapping_error(pdev, addr)) {
-			while (--i) {
+			while (i--) {
 				dma_unmap_page(pdev, ttm_dma->dma_address[i],
 					       PAGE_SIZE, DMA_BIDIRECTIONAL);
 				ttm_dma->dma_address[i] = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24be27d..20935eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,10 +635,6 @@
 		nv_crtc->lut.depth = 0;
 	}
 
-	/* Make sure that drm and hw vblank irqs get resumed if needed. */
-	for (head = 0; head < dev->mode_config.num_crtc; head++)
-		drm_vblank_on(dev, head);
-
 	/* This should ensure we don't hit a locking problem when someone
 	 * wakes us up via a connector.  We should never go into suspend
 	 * while the display is on anyways.
@@ -648,6 +644,10 @@
 
 	drm_helper_resume_force_mode(dev);
 
+	/* Make sure that drm and hw vblank irqs get resumed if needed. */
+	for (head = 0; head < dev->mode_config.num_crtc; head++)
+		drm_vblank_on(dev, head);
+
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577..7c2e782 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@
 		       cmd->command_size))
 		return -EFAULT;
 
-	reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+	reloc_info = kmalloc_array(cmd->relocs_num,
+				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
 	if (!reloc_info)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 3d031b5..9f029dd 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -68,5 +68,5 @@
 		       struct vm_area_struct *area)
 {
 	WARN_ONCE(1, "not implemented");
-	return ENOSYS;
+	return -ENOSYS;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 84d4563..fb6ad14 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
 #include "radeon.h"
 #include "radeon_trace.h"
 
@@ -245,6 +246,12 @@
 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 			      "better performance thanks to write-combining\n");
 	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+	/* For architectures that don't support WC memory,
+	 * mask out the WC flag from the BO
+	 */
+	if (!drm_arch_can_wc_memory())
+		bo->flags &= ~RADEON_GEM_GTT_WC;
 #endif
 
 	radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 460c8f2..248c5a9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -276,8 +276,12 @@
 	if (rdev->irq.installed) {
 		for (i = 0; i < rdev->num_crtc; i++) {
 			if (rdev->pm.active_crtcs & (1 << i)) {
-				rdev->pm.req_vblank |= (1 << i);
-				drm_vblank_get(rdev->ddev, i);
+				/* This can fail if a modeset is in progress */
+				if (drm_vblank_get(rdev->ddev, i) == 0)
+					rdev->pm.req_vblank |= (1 << i);
+				else
+					DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
+							 i);
 			}
 		}
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896..197b157 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@
 			/* see if we can skip over some allocations */
 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_fence_ref(fences[i]);
+
 		spin_unlock(&sa_manager->wq.lock);
 		r = radeon_fence_wait_any(rdev, fences, false);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_fence_unref(&fences[i]);
 		spin_lock(&sa_manager->wq.lock);
 		/* if we have nothing to wait for block */
 		if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e343074..e06ac54 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@
 						       0, PAGE_SIZE,
 						       PCI_DMA_BIDIRECTIONAL);
 		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
-			while (--i) {
+			while (i--) {
 				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 				gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 18dfe3e..22278bc 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -215,7 +215,7 @@
 	struct drm_gem_cma_object *cma_obj;
 
 	if (size == 0)
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	/* First, try to get a vc4_bo from the kernel BO cache. */
 	if (from_cache) {
@@ -237,7 +237,7 @@
 		if (IS_ERR(cma_obj)) {
 			DRM_ERROR("Failed to allocate from CMA:\n");
 			vc4_bo_stats_dump(vc4);
-			return NULL;
+			return ERR_PTR(-ENOMEM);
 		}
 	}
 
@@ -259,8 +259,8 @@
 		args->size = args->pitch * args->height;
 
 	bo = vc4_bo_create(dev, args->size, false);
-	if (!bo)
-		return -ENOMEM;
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
 
 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 	drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -443,8 +443,8 @@
 	 * get zeroed, and that might leak data between users.
 	 */
 	bo = vc4_bo_create(dev, args->size, false);
-	if (!bo)
-		return -ENOMEM;
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
 
 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 	drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -496,8 +496,8 @@
 	}
 
 	bo = vc4_bo_create(dev, args->size, true);
-	if (!bo)
-		return -ENOMEM;
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
 
 	ret = copy_from_user(bo->base.vaddr,
 			     (void __user *)(uintptr_t)args->data,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 080865e..51a6333 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -91,8 +91,12 @@
 	struct vc4_bo *overflow_mem;
 	struct work_struct overflow_mem_work;
 
+	int power_refcount;
+
+	/* Mutex controlling the power refcount. */
+	struct mutex power_lock;
+
 	struct {
-		uint32_t last_ct0ca, last_ct1ca;
 		struct timer_list timer;
 		struct work_struct reset_work;
 	} hangcheck;
@@ -142,6 +146,7 @@
 };
 
 struct vc4_v3d {
+	struct vc4_dev *vc4;
 	struct platform_device *pdev;
 	void __iomem *regs;
 };
@@ -192,6 +197,11 @@
 	/* Sequence number for this bin/render job. */
 	uint64_t seqno;
 
+	/* Last current addresses the hardware was processing when the
+	 * hangcheck timer checked on us.
+	 */
+	uint32_t last_ct0ca, last_ct1ca;
+
 	/* Kernel-space copy of the ioctl arguments */
 	struct drm_vc4_submit_cl *args;
 
@@ -434,7 +444,6 @@
 extern struct platform_driver vc4_v3d_driver;
 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
-int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
 
 /* vc4_validate.c */
 int
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 48ce30a..202aa15 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -23,6 +23,7 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/device.h>
 #include <linux/io.h>
 
@@ -228,8 +229,16 @@
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 
 	DRM_INFO("Resetting GPU.\n");
-	vc4_v3d_set_power(vc4, false);
-	vc4_v3d_set_power(vc4, true);
+
+	mutex_lock(&vc4->power_lock);
+	if (vc4->power_refcount) {
+		/* Power the device off and back on the by dropping the
+		 * reference on runtime PM.
+		 */
+		pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
+		pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+	}
+	mutex_unlock(&vc4->power_lock);
 
 	vc4_irq_reset(dev);
 
@@ -257,10 +266,17 @@
 	struct drm_device *dev = (struct drm_device *)data;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	uint32_t ct0ca, ct1ca;
+	unsigned long irqflags;
+	struct vc4_exec_info *exec;
+
+	spin_lock_irqsave(&vc4->job_lock, irqflags);
+	exec = vc4_first_job(vc4);
 
 	/* If idle, we can stop watching for hangs. */
-	if (list_empty(&vc4->job_list))
+	if (!exec) {
+		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 		return;
+	}
 
 	ct0ca = V3D_READ(V3D_CTNCA(0));
 	ct1ca = V3D_READ(V3D_CTNCA(1));
@@ -268,14 +284,16 @@
 	/* If we've made any progress in execution, rearm the timer
 	 * and wait.
 	 */
-	if (ct0ca != vc4->hangcheck.last_ct0ca ||
-	    ct1ca != vc4->hangcheck.last_ct1ca) {
-		vc4->hangcheck.last_ct0ca = ct0ca;
-		vc4->hangcheck.last_ct1ca = ct1ca;
+	if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
+		exec->last_ct0ca = ct0ca;
+		exec->last_ct1ca = ct1ca;
+		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 		vc4_queue_hangcheck(dev);
 		return;
 	}
 
+	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+
 	/* We've gone too long with no progress, reset.  This has to
 	 * be done from a work struct, since resetting can sleep and
 	 * this timer hook isn't allowed to.
@@ -340,12 +358,7 @@
 	finish_wait(&vc4->job_wait_queue, &wait);
 	trace_vc4_wait_for_seqno_end(dev, seqno);
 
-	if (ret && ret != -ERESTARTSYS) {
-		DRM_ERROR("timeout waiting for render thread idle\n");
-		return ret;
-	}
-
-	return 0;
+	return ret;
 }
 
 static void
@@ -578,9 +591,9 @@
 	}
 
 	bo = vc4_bo_create(dev, exec_size, true);
-	if (!bo) {
+	if (IS_ERR(bo)) {
 		DRM_ERROR("Couldn't allocate BO for binning\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(bo);
 		goto fail;
 	}
 	exec->exec_bo = &bo->base;
@@ -617,6 +630,7 @@
 static void
 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
 {
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	unsigned i;
 
 	/* Need the struct lock for drm_gem_object_unreference(). */
@@ -635,6 +649,11 @@
 	}
 	mutex_unlock(&dev->struct_mutex);
 
+	mutex_lock(&vc4->power_lock);
+	if (--vc4->power_refcount == 0)
+		pm_runtime_put(&vc4->v3d->pdev->dev);
+	mutex_unlock(&vc4->power_lock);
+
 	kfree(exec);
 }
 
@@ -746,6 +765,9 @@
 	struct drm_gem_object *gem_obj;
 	struct vc4_bo *bo;
 
+	if (args->pad != 0)
+		return -EINVAL;
+
 	gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (!gem_obj) {
 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
@@ -772,7 +794,7 @@
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct drm_vc4_submit_cl *args = data;
 	struct vc4_exec_info *exec;
-	int ret;
+	int ret = 0;
 
 	if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
 		DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
@@ -785,6 +807,15 @@
 		return -ENOMEM;
 	}
 
+	mutex_lock(&vc4->power_lock);
+	if (vc4->power_refcount++ == 0)
+		ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+	mutex_unlock(&vc4->power_lock);
+	if (ret < 0) {
+		kfree(exec);
+		return ret;
+	}
+
 	exec->args = args;
 	INIT_LIST_HEAD(&exec->unref_list);
 
@@ -839,6 +870,8 @@
 		    (unsigned long)dev);
 
 	INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
+
+	mutex_init(&vc4->power_lock);
 }
 
 void
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b68060e..78a2135 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -57,7 +57,7 @@
 	struct vc4_bo *bo;
 
 	bo = vc4_bo_create(dev, 256 * 1024, true);
-	if (!bo) {
+	if (IS_ERR(bo)) {
 		DRM_ERROR("Couldn't allocate binner overflow mem\n");
 		return;
 	}
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 8a2a312..0f12418 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -316,20 +316,11 @@
 	size += xtiles * ytiles * loop_body_size;
 
 	setup->rcl = &vc4_bo_create(dev, size, true)->base;
-	if (!setup->rcl)
-		return -ENOMEM;
+	if (IS_ERR(setup->rcl))
+		return PTR_ERR(setup->rcl);
 	list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
 		      &exec->unref_list);
 
-	rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
-	rcl_u32(setup,
-		(setup->color_write ? (setup->color_write->paddr +
-				       args->color_write.offset) :
-		 0));
-	rcl_u16(setup, args->width);
-	rcl_u16(setup, args->height);
-	rcl_u16(setup, args->color_write.bits);
-
 	/* The tile buffer gets cleared when the previous tile is stored.  If
 	 * the clear values changed between frames, then the tile buffer has
 	 * stale clear values in it, so we have to do a store in None mode (no
@@ -349,6 +340,15 @@
 		rcl_u32(setup, 0); /* no address, since we're in None mode */
 	}
 
+	rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
+	rcl_u32(setup,
+		(setup->color_write ? (setup->color_write->paddr +
+				       args->color_write.offset) :
+		 0));
+	rcl_u16(setup, args->width);
+	rcl_u16(setup, args->height);
+	rcl_u16(setup, args->color_write.bits);
+
 	for (y = min_y_tile; y <= max_y_tile; y++) {
 		for (x = min_x_tile; x <= max_x_tile; x++) {
 			bool first = (x == min_x_tile && y == min_y_tile);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 314ff71..31de5d1 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -17,6 +17,7 @@
  */
 
 #include "linux/component.h"
+#include "linux/pm_runtime.h"
 #include "vc4_drv.h"
 #include "vc4_regs.h"
 
@@ -144,18 +145,6 @@
 }
 #endif /* CONFIG_DEBUG_FS */
 
-int
-vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
-{
-	/* XXX: This interface is needed for GPU reset, and the way to
-	 * do it is to turn our power domain off and back on.  We
-	 * can't just reset from within the driver, because the reset
-	 * bits are in the power domain's register area, and get set
-	 * during the poweron process.
-	 */
-	return 0;
-}
-
 static void vc4_v3d_init_hw(struct drm_device *dev)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -167,6 +156,29 @@
 	V3D_WRITE(V3D_VPMBASE, 0);
 }
 
+#ifdef CONFIG_PM
+static int vc4_v3d_runtime_suspend(struct device *dev)
+{
+	struct vc4_v3d *v3d = dev_get_drvdata(dev);
+	struct vc4_dev *vc4 = v3d->vc4;
+
+	vc4_irq_uninstall(vc4->dev);
+
+	return 0;
+}
+
+static int vc4_v3d_runtime_resume(struct device *dev)
+{
+	struct vc4_v3d *v3d = dev_get_drvdata(dev);
+	struct vc4_dev *vc4 = v3d->vc4;
+
+	vc4_v3d_init_hw(vc4->dev);
+	vc4_irq_postinstall(vc4->dev);
+
+	return 0;
+}
+#endif
+
 static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -179,6 +191,8 @@
 	if (!v3d)
 		return -ENOMEM;
 
+	dev_set_drvdata(dev, v3d);
+
 	v3d->pdev = pdev;
 
 	v3d->regs = vc4_ioremap_regs(pdev, 0);
@@ -186,6 +200,7 @@
 		return PTR_ERR(v3d->regs);
 
 	vc4->v3d = v3d;
+	v3d->vc4 = vc4;
 
 	if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
 		DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
@@ -207,6 +222,8 @@
 		return ret;
 	}
 
+	pm_runtime_enable(dev);
+
 	return 0;
 }
 
@@ -216,6 +233,8 @@
 	struct drm_device *drm = dev_get_drvdata(master);
 	struct vc4_dev *vc4 = to_vc4_dev(drm);
 
+	pm_runtime_disable(dev);
+
 	drm_irq_uninstall(drm);
 
 	/* Disable the binner's overflow memory address, so the next
@@ -228,6 +247,10 @@
 	vc4->v3d = NULL;
 }
 
+static const struct dev_pm_ops vc4_v3d_pm_ops = {
+	SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
+};
+
 static const struct component_ops vc4_v3d_ops = {
 	.bind   = vc4_v3d_bind,
 	.unbind = vc4_v3d_unbind,
@@ -255,5 +278,6 @@
 	.driver = {
 		.name = "vc4_v3d",
 		.of_match_table = vc4_v3d_dt_match,
+		.pm = &vc4_v3d_pm_ops,
 	},
 };
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index e26d9f6..24c2c74 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -401,8 +401,8 @@
 	tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
 				true);
 	exec->tile_bo = &tile_bo->base;
-	if (!exec->tile_bo)
-		return -ENOMEM;
+	if (IS_ERR(exec->tile_bo))
+		return PTR_ERR(exec->tile_bo);
 	list_add_tail(&tile_bo->unref_head, &exec->unref_list);
 
 	/* tile alloc address. */
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 52f708b..d50c701 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -313,6 +313,10 @@
 		hwlock = radix_tree_deref_slot(slot);
 		if (unlikely(!hwlock))
 			continue;
+		if (radix_tree_is_indirect_ptr(hwlock)) {
+			slot = radix_tree_iter_retry(&iter);
+			continue;
+		}
 
 		if (hwlock->bank->dev->of_node == args.np) {
 			ret = 0;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index edc29b1..833ea9d 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -213,6 +213,7 @@
 config STK8BA50
 	tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
 	depends on I2C
+	depends on IIO_TRIGGER
 	help
 	  Say yes here to get support for the Sensortek STK8BA50 3-axis
 	  accelerometer.
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 605ff42..283ded7 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -175,6 +175,7 @@
 config EXYNOS_ADC
 	tristate "Exynos ADC driver support"
 	depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
+	depends on HAS_IOMEM
 	help
 	  Core support for the ADC block found in the Samsung EXYNOS series
 	  of SoCs for drivers such as the touchscreen and hwmon to use to share
@@ -207,6 +208,7 @@
 config IMX7D_ADC
 	tristate "IMX7D ADC driver"
 	depends on ARCH_MXC || COMPILE_TEST
+	depends on HAS_IOMEM
 	help
 	  Say yes here to build support for IMX7D ADC.
 
@@ -409,6 +411,7 @@
 config VF610_ADC
 	tristate "Freescale vf610 ADC driver"
 	depends on OF
+	depends on HAS_IOMEM
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
 	help
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 942320e..c1e0553 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -289,7 +289,7 @@
 		goto error_kfifo_free;
 
 	indio_dev->setup_ops = setup_ops;
-	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+	indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
 
 	return 0;
 
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 43d1458..b4dde83 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -300,6 +300,7 @@
 	data->client = client;
 
 	indio_dev->dev.parent = &client->dev;
+	indio_dev->name = id->name;
 	indio_dev->info = &mcp4725_info;
 	indio_dev->channels = &mcp4725_channel;
 	indio_dev->num_channels = 1;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 1165b1c..cfc5a05 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -117,7 +117,7 @@
 	if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
 		return -EIO;
 
-	dht11->timestamp = ktime_get_real_ns();
+	dht11->timestamp = ktime_get_boot_ns();
 	if (hum_int < 20) {  /* DHT22 */
 		dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
 					((temp_int & 0x80) ? -100 : 100);
@@ -145,7 +145,7 @@
 
 	/* TODO: Consider making the handler safe for IRQ sharing */
 	if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
-		dht11->edges[dht11->num_edges].ts = ktime_get_real_ns();
+		dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
 		dht11->edges[dht11->num_edges++].value =
 						gpio_get_value(dht11->gpio);
 
@@ -164,7 +164,7 @@
 	int ret, timeres;
 
 	mutex_lock(&dht11->lock);
-	if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
+	if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
 		timeres = ktime_get_resolution_ns();
 		if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
 			dev_err(dht11->dev, "timeresolution %dns too low\n",
@@ -279,7 +279,7 @@
 		return -EINVAL;
 	}
 
-	dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1;
+	dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
 	dht11->num_edges = -1;
 
 	platform_set_drvdata(pdev, iio);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 48fbc0b..8f8d137 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -5,9 +5,9 @@
 config INV_MPU6050_IIO
 	tristate "Invensense MPU6050 devices"
 	depends on I2C && SYSFS
+	depends on I2C_MUX
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
-	select I2C_MUX
 	help
 	  This driver supports the Invensense MPU6050 devices.
 	  This driver can also support MPU6500 in MPU6050 compatibility mode
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 80fbbfd..734a004 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -349,6 +349,8 @@
 
 void iio_channel_release(struct iio_channel *channel)
 {
+	if (!channel)
+		return;
 	iio_device_put(channel->indio_dev);
 	kfree(channel);
 }
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 60537ec..53201d9 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -54,7 +54,9 @@
 			.realbits	= 32,
 			.storagebits	= 32,
 		},
-		.info_mask_separate	= BIT(IIO_CHAN_INFO_RAW),
+		/* _RAW is here for backward ABI compatibility */
+		.info_mask_separate	= BIT(IIO_CHAN_INFO_RAW) |
+					  BIT(IIO_CHAN_INFO_PROCESSED),
 	},
 };
 
@@ -152,7 +154,7 @@
 	s32 temp_val;
 	int ret;
 
-	if (mask != IIO_CHAN_INFO_RAW)
+	if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
 		return -EINVAL;
 
 	/* we support only illumination (_ALI) so far. */
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 809a961..6bf89d8 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -180,7 +180,7 @@
 			{500000, 2000000}
 };
 
-static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
+static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
 					   int len, int val, int val2)
 {
 	int i, freq;
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index f5ecd6e..a0d7dee 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -117,7 +117,7 @@
 		*val = ret >> 6;
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_OFFSET:
-		*val = 605;
+		*val = -605;
 		*val2 = 750000;
 		return IIO_VAL_INT_PLUS_MICRO;
 	case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 93e29fb..db35e04 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -87,7 +87,7 @@
 
 	ret = i2c_transfer(client->adapter, msg, 2);
 
-	return (ret == 2) ? 0 : ret;
+	return (ret == 2) ? 0 : -EIO;
 }
 
 static int lidar_smbus_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 3de9351..14606af 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,6 @@
 	union ib_gid gid;
 	struct ib_gid_attr gid_attr = {};
 	ssize_t ret;
-	va_list args;
 
 	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
 			   &gid_attr);
@@ -348,7 +347,6 @@
 err:
 	if (gid_attr.ndev)
 		dev_put(gid_attr.ndev);
-	va_end(args);
 	return ret;
 }
 
@@ -722,12 +720,11 @@
 
 	if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
 				&cpi, 40, sizeof(cpi)) >= 0) {
-
-		if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
+		if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
 			/* We have extended counters */
 			return &pma_group_ext;
 
-		if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
+		if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
 			/* But not the IETF ones */
 			return &pma_group_noietf;
 	}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 19837d2..2116132 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -322,6 +322,8 @@
 		      int    immediate_present,
 		      struct ib_ud_header *header)
 {
+	size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0;
+
 	grh_present = grh_present && !ip_version;
 	memset(header, 0, sizeof *header);
 
@@ -353,7 +355,8 @@
 	if (ip_version == 6 || grh_present) {
 		header->grh.ip_version      = 6;
 		header->grh.payload_length  =
-			cpu_to_be16((IB_BTH_BYTES     +
+			cpu_to_be16((udp_bytes        +
+				     IB_BTH_BYTES     +
 				     IB_DETH_BYTES    +
 				     payload_bytes    +
 				     4                + /* ICRC     */
@@ -362,8 +365,6 @@
 	}
 
 	if (ip_version == 4) {
-		int udp_bytes = udp_present ? IB_UDP_BYTES : 0;
-
 		header->ip4.ver = 4; /* version 4 */
 		header->ip4.hdr_len = 5; /* 5 words */
 		header->ip4.tot_len =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ec737e2..03c418c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -844,6 +844,8 @@
 	int err;
 	int i;
 	size_t reqlen;
+	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
+				     max_cqe_version);
 
 	if (!dev->ib_active)
 		return ERR_PTR(-EAGAIN);
@@ -854,7 +856,7 @@
 	reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
 	if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
 		ver = 0;
-	else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+	else if (reqlen >= min_req_v2)
 		ver = 2;
 	else
 		return ERR_PTR(-EINVAL);
@@ -2214,7 +2216,9 @@
 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
 	dev->ib_dev.uverbs_ex_cmd_mask =
-		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
+		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
+		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
+		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
 
 	dev->ib_dev.query_device	= mlx5_ib_query_device;
 	dev->ib_dev.query_port		= mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8fb9c27..34cb8e8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -270,8 +270,10 @@
 		/* fall through */
 	case IB_QPT_RC:
 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
-			sizeof(struct mlx5_wqe_atomic_seg) +
-			sizeof(struct mlx5_wqe_raddr_seg);
+			max(sizeof(struct mlx5_wqe_atomic_seg) +
+			    sizeof(struct mlx5_wqe_raddr_seg),
+			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+			    sizeof(struct mlx5_mkey_seg));
 		break;
 
 	case IB_QPT_XRC_TGT:
@@ -279,9 +281,9 @@
 
 	case IB_QPT_UC:
 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
-			sizeof(struct mlx5_wqe_raddr_seg) +
-			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
-			sizeof(struct mlx5_mkey_seg);
+			max(sizeof(struct mlx5_wqe_raddr_seg),
+			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+			    sizeof(struct mlx5_mkey_seg));
 		break;
 
 	case IB_QPT_UD:
@@ -1036,7 +1038,7 @@
 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
 	MLX5_SET(wq, wq, end_padding_mode,
-		 MLX5_GET64(qpc, qpc, end_padding_mode));
+		 MLX5_GET(qpc, qpc, end_padding_mode));
 	MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
 	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
 	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1615,15 +1617,6 @@
 
 	if (pd) {
 		dev = to_mdev(pd->device);
-	} else {
-		/* being cautious here */
-		if (init_attr->qp_type != IB_QPT_XRC_TGT &&
-		    init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
-			pr_warn("%s: no PD for transport %s\n", __func__,
-				ib_qp_type_str(init_attr->qp_type));
-			return ERR_PTR(-EINVAL);
-		}
-		dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
 
 		if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
 			if (!pd->uobject) {
@@ -1634,6 +1627,15 @@
 				return ERR_PTR(-EINVAL);
 			}
 		}
+	} else {
+		/* being cautious here */
+		if (init_attr->qp_type != IB_QPT_XRC_TGT &&
+		    init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
+			pr_warn("%s: no PD for transport %s\n", __func__,
+				ib_qp_type_str(init_attr->qp_type));
+			return ERR_PTR(-EINVAL);
+		}
+		dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
 	}
 
 	switch (init_attr->qp_type) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 5738493..f387430 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -228,6 +228,11 @@
 
 	ocrdma_alloc_pd_pool(dev);
 
+	if (!ocrdma_alloc_stats_resources(dev)) {
+		pr_err("%s: stats resource allocation failed\n", __func__);
+		goto alloc_err;
+	}
+
 	spin_lock_init(&dev->av_tbl.lock);
 	spin_lock_init(&dev->flush_q_lock);
 	return 0;
@@ -238,6 +243,7 @@
 
 static void ocrdma_free_resources(struct ocrdma_dev *dev)
 {
+	ocrdma_release_stats_resources(dev);
 	kfree(dev->stag_arr);
 	kfree(dev->qp_tbl);
 	kfree(dev->cq_tbl);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 86c303a..255f774 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -64,10 +64,11 @@
 	return cpy_len;
 }
 
-static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
 {
 	struct stats_mem *mem = &dev->stats_mem;
 
+	mutex_init(&dev->stats_lock);
 	/* Alloc mbox command mem*/
 	mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
 			sizeof(struct ocrdma_rdma_stats_resp));
@@ -91,13 +92,14 @@
 	return true;
 }
 
-static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
 {
 	struct stats_mem *mem = &dev->stats_mem;
 
 	if (mem->va)
 		dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
 				  mem->va, mem->pa);
+	mem->va = NULL;
 	kfree(mem->debugfs_mem);
 }
 
@@ -838,15 +840,9 @@
 				&dev->reset_stats, &ocrdma_dbg_ops))
 		goto err;
 
-	/* Now create dma_mem for stats mbx command */
-	if (!ocrdma_alloc_stats_mem(dev))
-		goto err;
-
-	mutex_init(&dev->stats_lock);
 
 	return;
 err:
-	ocrdma_release_stats_mem(dev);
 	debugfs_remove_recursive(dev->dir);
 	dev->dir = NULL;
 }
@@ -855,9 +851,7 @@
 {
 	if (!dev->dir)
 		return;
-	debugfs_remove(dev->dir);
-	mutex_destroy(&dev->stats_lock);
-	ocrdma_release_stats_mem(dev);
+	debugfs_remove_recursive(dev->dir);
 }
 
 void ocrdma_init_debugfs(void)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index c9e58d0..bba1fec 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -65,6 +65,8 @@
 
 void ocrdma_rem_debugfs(void);
 void ocrdma_init_debugfs(void);
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
 void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
 void ocrdma_add_port_stats(struct ocrdma_dev *dev);
 int ocrdma_pma_counters(struct ocrdma_dev *dev,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index d4c687b..37620b4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@
 					IB_DEVICE_SYS_IMAGE_GUID |
 					IB_DEVICE_LOCAL_DMA_LKEY |
 					IB_DEVICE_MEM_MGT_EXTENSIONS;
-	attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
-	attr->max_sge_rd = 0;
+	attr->max_sge = dev->attr.max_send_sge;
+	attr->max_sge_rd = attr->max_sge;
 	attr->max_cq = dev->attr.max_cq;
 	attr->max_cqe = dev->attr.max_cqe;
 	attr->max_mr = dev->attr.max_mr;
@@ -2726,8 +2726,7 @@
 		OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
 	ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
 						OCRDMA_CQE_SRCQP_MASK;
-	ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
-						OCRDMA_CQE_PKEY_MASK;
+	ibwc->pkey_index = 0;
 	ibwc->wc_flags = IB_WC_GRH;
 	ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
 					OCRDMA_CQE_UD_XFER_LEN_SHIFT);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c14..fa9c42f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -245,8 +245,6 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, IPOIB_ENCAP_LEN);
 
-	skb->truesize = SKB_TRUESIZE(skb->len);
-
 	++dev->stats.rx_packets;
 	dev->stats.rx_bytes += skb->len;
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 050dfa1..2588931 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@
 	return status;
 }
 
-static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@
 	ib_sa_comp_mask comp_mask;
 	int ret = 0;
 
+	if (!priv->broadcast ||
+	    !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+		return -EINVAL;
+
 	ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
 
 	rec.mgid     = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@
 			rec.join_state = 4;
 #endif
 	}
+	spin_unlock_irq(&priv->lock);
 
 	multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
 					 &rec, comp_mask, GFP_KERNEL,
 					 ipoib_mcast_join_complete, mcast);
+	spin_lock_irq(&priv->lock);
 	if (IS_ERR(multicast)) {
 		ret = PTR_ERR(multicast);
 		ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
-		spin_lock_irq(&priv->lock);
 		/* Requeue this join task with a backoff delay */
 		__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
 		clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
 		spin_unlock_irq(&priv->lock);
 		complete(&mcast->done);
+		spin_lock_irq(&priv->lock);
 	}
+	return 0;
 }
 
 void ipoib_mcast_join_task(struct work_struct *work)
@@ -620,9 +630,10 @@
 				/* Found the next unjoined group */
 				init_completion(&mcast->done);
 				set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-				spin_unlock_irq(&priv->lock);
-				ipoib_mcast_join(dev, mcast);
-				spin_lock_irq(&priv->lock);
+				if (ipoib_mcast_join(dev, mcast)) {
+					spin_unlock_irq(&priv->lock);
+					return;
+				}
 			} else if (!delay_until ||
 				 time_before(mcast->delay_until, delay_until))
 				delay_until = mcast->delay_until;
@@ -641,10 +652,9 @@
 	if (mcast) {
 		init_completion(&mcast->done);
 		set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+		ipoib_mcast_join(dev, mcast);
 	}
 	spin_unlock_irq(&priv->lock);
-	if (mcast)
-		ipoib_mcast_join(dev, mcast);
 }
 
 int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6727954..e8a84d1 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1207,7 +1207,6 @@
 #else
 static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
 static void xpad_led_disconnect(struct usb_xpad *xpad) { }
-static void xpad_identify_controller(struct usb_xpad *xpad) { }
 #endif
 
 static int xpad_start_input(struct usb_xpad *xpad)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4d446d5..c01a1d6 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -235,7 +235,7 @@
 	unsigned short gpimapsize;
 	unsigned extend_cfg;
 	bool is_adp5585;
-	bool adp5585_support_row5;
+	bool support_row5;
 #ifdef CONFIG_GPIOLIB
 	unsigned char gpiomap[ADP5589_MAXGPIO];
 	bool export_gpio;
@@ -485,7 +485,7 @@
 	if (kpad->extend_cfg & C4_EXTEND_CFG)
 		pin_used[kpad->var->c4_extend_cfg] = true;
 
-	if (!kpad->adp5585_support_row5)
+	if (!kpad->support_row5)
 		pin_used[5] = true;
 
 	for (i = 0; i < kpad->var->maxgpio; i++)
@@ -884,12 +884,13 @@
 
 	switch (id->driver_data) {
 	case ADP5585_02:
-		kpad->adp5585_support_row5 = true;
+		kpad->support_row5 = true;
 	case ADP5585_01:
 		kpad->is_adp5585 = true;
 		kpad->var = &const_adp5585;
 		break;
 	case ADP5589:
+		kpad->support_row5 = true;
 		kpad->var = &const_adp5589;
 		break;
 	}
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 378db10..4401be2 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -304,8 +304,10 @@
 		led->cdev.brightness = LED_OFF;
 
 		error = of_property_read_u32(child, "reg", &reg);
-		if (error != 0 || reg >= num_leds)
+		if (error != 0 || reg >= num_leds) {
+			of_node_put(child);
 			return -EINVAL;
+		}
 
 		led->reg = reg;
 		led->priv = priv;
@@ -313,8 +315,10 @@
 		INIT_WORK(&led->work, cap11xx_led_work);
 
 		error = devm_led_classdev_register(dev, &led->cdev);
-		if (error)
+		if (error) {
+			of_node_put(child);
 			return error;
+		}
 
 		priv->num_leds++;
 		led++;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d6d16fa..1f2337a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -733,7 +733,7 @@
 	  module will be called xen-kbdfront.
 
 config INPUT_SIRFSOC_ONKEY
-	bool "CSR SiRFSoC power on/off/suspend key support"
+	tristate "CSR SiRFSoC power on/off/suspend key support"
 	depends on ARCH_SIRF && OF
 	default y
 	help
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 9d5b89b..ed7237f 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -101,7 +101,7 @@
 static const struct of_device_id sirfsoc_pwrc_of_match[] = {
 	{ .compatible = "sirf,prima2-pwrc" },
 	{},
-}
+};
 MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
 
 static int sirfsoc_pwrc_probe(struct platform_device *pdev)
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index e272f06..a3f0f5a 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -458,8 +458,6 @@
 	priv->abs_dev = abs_dev;
 	psmouse->private = priv;
 
-	input_set_capability(rel_dev, EV_REL, REL_WHEEL);
-
 	/* Set up and register absolute device */
 	snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
 		 psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@
 	abs_dev->id.version = psmouse->model;
 	abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
 
-	error = input_register_device(priv->abs_dev);
-	if (error)
-		goto init_fail;
-
 	/* Set absolute device capabilities */
 	input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
 	input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@
 	input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
 	input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
 
+	error = input_register_device(priv->abs_dev);
+	if (error)
+		goto init_fail;
+
+	/* Add wheel capability to the relative device */
+	input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+
 	psmouse->protocol_handler = vmmouse_process_byte;
 	psmouse->disconnect = vmmouse_disconnect;
 	psmouse->reconnect = vmmouse_reconnect;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 8f82897..1ca7f55 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -134,7 +134,7 @@
 	int error;
 
 	error = device_attach(&serio->dev);
-	if (error < 0)
+	if (error < 0 && error != -EPROBE_DEFER)
 		dev_warn(&serio->dev,
 			 "device_attach() failed for %s (%s), error: %d\n",
 			 serio->phys, serio->name, error);
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index 5d4903a..69828d0 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 0b0f8c1..23fbe38 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -822,16 +822,22 @@
 	int error;
 
 	error = device_property_read_u32(dev, "threshold", &val);
-	if (!error)
-		reg_addr->reg_threshold = val;
+	if (!error) {
+		edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
+		tsdata->threshold = val;
+	}
 
 	error = device_property_read_u32(dev, "gain", &val);
-	if (!error)
-		reg_addr->reg_gain = val;
+	if (!error) {
+		edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
+		tsdata->gain = val;
+	}
 
 	error = device_property_read_u32(dev, "offset", &val);
-	if (!error)
-		reg_addr->reg_offset = val;
+	if (!error) {
+		edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
+		tsdata->offset = val;
+	}
 }
 
 static void
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c..fb092f3 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1353,7 +1353,7 @@
 
 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
 
-	sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
+	sts =  readl(iommu->reg + DMAR_GSTS_REG);
 	if (!(sts & DMA_GSTS_QIES))
 		goto end;
 
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 5046483..d9939fa 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@
 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 {
 	struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+	struct intel_svm_dev *sdev;
 
+	/* This might end up being called from exit_mmap(), *before* the page
+	 * tables are cleared. And __mmu_notifier_release() will delete us from
+	 * the list of notifiers so that our invalidate_range() callback doesn't
+	 * get called when the page tables are cleared. So we need to protect
+	 * against hardware accessing those page tables.
+	 *
+	 * We do it by clearing the entry in the PASID table and then flushing
+	 * the IOTLB and the PASID table caches. This might upset hardware;
+	 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
+	 * page) so that we end up taking a fault that the hardware really
+	 * *has* to handle gracefully without affecting other processes.
+	 */
 	svm->iommu->pasid_table[svm->pasid].val = 0;
+	wmb();
 
-	/* There's no need to do any flush because we can't get here if there
-	 * are any devices left anyway. */
-	WARN_ON(!list_empty(&svm->devs));
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdev, &svm->devs, list) {
+		intel_flush_pasid_dev(svm, sdev, svm->pasid);
+		intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+	}
+	rcu_read_unlock();
+
 }
 
 static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@
 				goto out;
 			}
 			iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
-			mm = NULL;
 		} else
 			iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
 		wmb();
@@ -442,11 +459,11 @@
 				kfree_rcu(sdev, rcu);
 
 				if (list_empty(&svm->devs)) {
-					mmu_notifier_unregister(&svm->notifier, svm->mm);
 
 					idr_remove(&svm->iommu->pasid_idr, svm->pasid);
 					if (svm->mm)
-						mmput(svm->mm);
+						mmu_notifier_unregister(&svm->notifier, svm->mm);
+
 					/* We mandate that no page faults may be outstanding
 					 * for the PASID when intel_svm_unbind_mm() is called.
 					 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@
 	struct intel_svm *svm = NULL;
 	int head, tail, handled = 0;
 
+	/* Clear PPR bit before reading head/tail registers, to
+	 * ensure that we get a new interrupt if needed. */
+	writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
+
 	tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
 	head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
 	while (head != tail) {
@@ -551,6 +572,9 @@
 		 * any faults on kernel addresses. */
 		if (!svm->mm)
 			goto bad_req;
+		/* If the mm is already defunct, don't handle faults. */
+		if (!atomic_inc_not_zero(&svm->mm->mm_users))
+			goto bad_req;
 		down_read(&svm->mm->mmap_sem);
 		vma = find_extend_vma(svm->mm, address);
 		if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@
 		result = QI_RESP_SUCCESS;
 	invalid:
 		up_read(&svm->mm->mmap_sem);
+		mmput(svm->mm);
 	bad_req:
 		/* Accounting for major/minor faults? */
 		rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba45..ac59692 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@
 
 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
 
-	sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+	sts = readl(iommu->reg + DMAR_GSTS_REG);
 	if (!(sts & DMA_GSTS_IRES))
 		goto end;
 
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 3447549..0a73632 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -66,7 +66,10 @@
 	unsigned long		phys_base;
 	struct its_cmd_block	*cmd_base;
 	struct its_cmd_block	*cmd_write;
-	void			*tables[GITS_BASER_NR_REGS];
+	struct {
+		void		*base;
+		u32		order;
+	} tables[GITS_BASER_NR_REGS];
 	struct its_collection	*collections;
 	struct list_head	its_device_list;
 	u64			flags;
@@ -807,9 +810,10 @@
 	int i;
 
 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
-		if (its->tables[i]) {
-			free_page((unsigned long)its->tables[i]);
-			its->tables[i] = NULL;
+		if (its->tables[i].base) {
+			free_pages((unsigned long)its->tables[i].base,
+				   its->tables[i].order);
+			its->tables[i].base = NULL;
 		}
 	}
 }
@@ -890,7 +894,8 @@
 			goto out_free;
 		}
 
-		its->tables[i] = base;
+		its->tables[i].base = base;
+		its->tables[i].order = order;
 
 retry_baser:
 		val = (virt_to_phys(base) 				 |
@@ -940,7 +945,7 @@
 			 * something is horribly wrong...
 			 */
 			free_pages((unsigned long)base, order);
-			its->tables[i] = NULL;
+			its->tables[i].base = NULL;
 
 			switch (psz) {
 			case SZ_16K:
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 911758c..8f9ebf7 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -384,9 +384,6 @@
 	.irq_unmask		= gic_unmask_irq,
 	.irq_eoi		= gic_eoi_irq,
 	.irq_set_type		= gic_set_type,
-#ifdef CONFIG_SMP
-	.irq_set_affinity	= gic_set_affinity,
-#endif
 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
@@ -400,9 +397,6 @@
 	.irq_unmask		= gic_unmask_irq,
 	.irq_eoi		= gic_eoimode1_eoi_irq,
 	.irq_set_type		= gic_set_type,
-#ifdef CONFIG_SMP
-	.irq_set_affinity	= gic_set_affinity,
-#endif
 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
 	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
@@ -443,7 +437,7 @@
 	u32 bypass = 0;
 	u32 mode = 0;
 
-	if (static_key_true(&supports_deactivate))
+	if (gic == &gic_data[0] && static_key_true(&supports_deactivate))
 		mode = GIC_CPU_CTRL_EOImodeNS;
 
 	/*
@@ -1039,6 +1033,11 @@
 		gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
 	}
 
+#ifdef CONFIG_SMP
+	if (gic_nr == 0)
+		gic->chip.irq_set_affinity = gic_set_affinity;
+#endif
+
 #ifdef CONFIG_GIC_NON_BANKED
 	if (percpu_offset) { /* Frankein-GIC without banked registers... */
 		unsigned int cpu;
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 0704362..376b280 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -22,7 +22,6 @@
 #include <linux/of_irq.h>
 
 #include <asm/exception.h>
-#include <asm/mach/irq.h>
 
 #define SUN4I_IRQ_VECTOR_REG		0x00
 #define SUN4I_IRQ_PROTECTION_REG	0x08
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb..9f6acd5 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -572,11 +572,13 @@
 		}
 	}
 
-	ret = nvm_get_sysblock(dev, &dev->sb);
-	if (!ret)
-		pr_err("nvm: device not initialized.\n");
-	else if (ret < 0)
-		pr_err("nvm: err (%d) on device initialization\n", ret);
+	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
+		ret = nvm_get_sysblock(dev, &dev->sb);
+		if (!ret)
+			pr_err("nvm: device not initialized.\n");
+		else if (ret < 0)
+			pr_err("nvm: err (%d) on device initialization\n", ret);
+	}
 
 	/* register device with a supported media manager */
 	down_write(&nvm_lock);
@@ -1055,9 +1057,11 @@
 	strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
 	info.fs_ppa.ppa = -1;
 
-	ret = nvm_init_sysblock(dev, &info);
-	if (ret)
-		return ret;
+	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
+		ret = nvm_init_sysblock(dev, &info);
+		if (ret)
+			return ret;
+	}
 
 	memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
 
@@ -1117,7 +1121,10 @@
 		dev->mt = NULL;
 	}
 
-	return nvm_dev_factory(dev, fact.flags);
+	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
+		return nvm_dev_factory(dev, fact.flags);
+
+	return 0;
 }
 
 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c7595..307db1e 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -300,8 +300,10 @@
 	}
 
 	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
-	if (!page)
+	if (!page) {
+		bio_put(bio);
 		return -ENOMEM;
+	}
 
 	while ((slot = find_first_zero_bit(rblk->invalid_pages,
 					    nr_pgs_per_blk)) < nr_pgs_per_blk) {
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7..f7b3733 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -174,8 +174,7 @@
 static inline int request_intersects(struct rrpc_inflight_rq *r,
 				sector_t laddr_start, sector_t laddr_end)
 {
-	return (laddr_end >= r->l_start && laddr_end <= r->l_end) &&
-		(laddr_start >= r->l_start && laddr_start <= r->l_end);
+	return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
 }
 
 static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -184,6 +183,8 @@
 	sector_t laddr_end = laddr + pages - 1;
 	struct rrpc_inflight_rq *rtmp;
 
+	WARN_ON(irqs_disabled());
+
 	spin_lock_irq(&rrpc->inflights.lock);
 	list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
 		if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 546d05f..b2bbe86 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -81,6 +81,7 @@
 config MAILBOX_TEST
 	tristate "Mailbox Test Client"
 	depends on OF
+	depends on HAS_IOMEM
 	help
 	  Test client to help with testing new Controller driver
 	  implementations.
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 45d85ae..8f779a1 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -81,16 +81,10 @@
  */
 static struct mbox_chan *get_pcc_channel(int id)
 {
-	struct mbox_chan *pcc_chan;
-
 	if (id < 0 || id > pcc_mbox_ctrl.num_chans)
 		return ERR_PTR(-ENOENT);
 
-	pcc_chan = (struct mbox_chan *)
-		(unsigned long) pcc_mbox_channels +
-		(id * sizeof(*pcc_chan));
-
-	return pcc_chan;
+	return &pcc_mbox_channels[id];
 }
 
 /**
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4f22e91..d80cce4 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -210,10 +210,6 @@
 	struct block_device *bdev;
 	struct mddev *mddev = bitmap->mddev;
 	struct bitmap_storage *store = &bitmap->storage;
-	int node_offset = 0;
-
-	if (mddev_is_clustered(bitmap->mddev))
-		node_offset = bitmap->cluster_slot * store->file_pages;
 
 	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
 		int size = PAGE_SIZE;
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4a8e150..685aa2d 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -170,7 +170,7 @@
 		conf->nfaults = n+1;
 }
 
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void faulty_make_request(struct mddev *mddev, struct bio *bio)
 {
 	struct faulty_conf *conf = mddev->private;
 	int failit = 0;
@@ -226,7 +226,7 @@
 	generic_make_request(bio);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void faulty_status(struct seq_file *seq, struct mddev *mddev)
 {
 	struct faulty_conf *conf = mddev->private;
 	int n;
@@ -259,7 +259,7 @@
 }
 
 
-static int reshape(struct mddev *mddev)
+static int faulty_reshape(struct mddev *mddev)
 {
 	int mode = mddev->new_layout & ModeMask;
 	int count = mddev->new_layout >> ModeShift;
@@ -299,7 +299,7 @@
 	return sectors;
 }
 
-static int run(struct mddev *mddev)
+static int faulty_run(struct mddev *mddev)
 {
 	struct md_rdev *rdev;
 	int i;
@@ -327,7 +327,7 @@
 	md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
 	mddev->private = conf;
 
-	reshape(mddev);
+	faulty_reshape(mddev);
 
 	return 0;
 }
@@ -344,11 +344,11 @@
 	.name		= "faulty",
 	.level		= LEVEL_FAULTY,
 	.owner		= THIS_MODULE,
-	.make_request	= make_request,
-	.run		= run,
+	.make_request	= faulty_make_request,
+	.run		= faulty_run,
 	.free		= faulty_free,
-	.status		= status,
-	.check_reshape	= reshape,
+	.status		= faulty_status,
+	.check_reshape	= faulty_reshape,
 	.size		= faulty_size,
 };
 
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0ded8e9..dd97d42 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -293,6 +293,7 @@
 dlm_unlock:
 		dlm_unlock_sync(bm_lockres);
 clear_bit:
+		lockres_free(bm_lockres);
 		clear_bit(slot, &cinfo->recovery_map);
 	}
 }
@@ -682,8 +683,10 @@
 		bm_lockres = lockres_init(mddev, str, NULL, 1);
 		if (!bm_lockres)
 			return -ENOMEM;
-		if (i == (cinfo->slot_number - 1))
+		if (i == (cinfo->slot_number - 1)) {
+			lockres_free(bm_lockres);
 			continue;
+		}
 
 		bm_lockres->flags |= DLM_LKF_NOQUEUE;
 		ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
@@ -858,6 +861,7 @@
 	lockres_free(cinfo->token_lockres);
 	lockres_free(cinfo->ack_lockres);
 	lockres_free(cinfo->no_new_dev_lockres);
+	lockres_free(cinfo->resync_lockres);
 	lockres_free(cinfo->bitmap_lockres);
 	unlock_all_bitmaps(mddev);
 	dlm_release_lockspace(cinfo->lockspace, 2);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c4b9134..4e3843f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1044,7 +1044,7 @@
 	kfree(plug);
 }
 
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 {
 	struct r1conf *conf = mddev->private;
 	struct raid1_info *mirror;
@@ -1422,7 +1422,7 @@
 	wake_up(&conf->wait_barrier);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid1_status(struct seq_file *seq, struct mddev *mddev)
 {
 	struct r1conf *conf = mddev->private;
 	int i;
@@ -1439,7 +1439,7 @@
 	seq_printf(seq, "]");
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 {
 	char b[BDEVNAME_SIZE];
 	struct r1conf *conf = mddev->private;
@@ -2472,7 +2472,8 @@
  * that can be installed to exclude normal IO requests.
  */
 
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+				   int *skipped)
 {
 	struct r1conf *conf = mddev->private;
 	struct r1bio *r1_bio;
@@ -2890,7 +2891,7 @@
 }
 
 static void raid1_free(struct mddev *mddev, void *priv);
-static int run(struct mddev *mddev)
+static int raid1_run(struct mddev *mddev)
 {
 	struct r1conf *conf;
 	int i;
@@ -3170,15 +3171,15 @@
 	.name		= "raid1",
 	.level		= 1,
 	.owner		= THIS_MODULE,
-	.make_request	= make_request,
-	.run		= run,
+	.make_request	= raid1_make_request,
+	.run		= raid1_run,
 	.free		= raid1_free,
-	.status		= status,
-	.error_handler	= error,
+	.status		= raid1_status,
+	.error_handler	= raid1_error,
 	.hot_add_disk	= raid1_add_disk,
 	.hot_remove_disk= raid1_remove_disk,
 	.spare_active	= raid1_spare_active,
-	.sync_request	= sync_request,
+	.sync_request	= raid1_sync_request,
 	.resize		= raid1_resize,
 	.size		= raid1_size,
 	.check_reshape	= raid1_reshape,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ce959b4..1c1447d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1442,7 +1442,7 @@
 	one_write_done(r10_bio);
 }
 
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 {
 	struct r10conf *conf = mddev->private;
 	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1484,7 +1484,7 @@
 	wake_up(&conf->wait_barrier);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid10_status(struct seq_file *seq, struct mddev *mddev)
 {
 	struct r10conf *conf = mddev->private;
 	int i;
@@ -1562,7 +1562,7 @@
 		_enough(conf, 1, ignore);
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
 {
 	char b[BDEVNAME_SIZE];
 	struct r10conf *conf = mddev->private;
@@ -2802,7 +2802,7 @@
  *
  */
 
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 			     int *skipped)
 {
 	struct r10conf *conf = mddev->private;
@@ -3523,7 +3523,7 @@
 	return ERR_PTR(err);
 }
 
-static int run(struct mddev *mddev)
+static int raid10_run(struct mddev *mddev)
 {
 	struct r10conf *conf;
 	int i, disk_idx, chunk_size;
@@ -4617,15 +4617,15 @@
 	.name		= "raid10",
 	.level		= 10,
 	.owner		= THIS_MODULE,
-	.make_request	= make_request,
-	.run		= run,
+	.make_request	= raid10_make_request,
+	.run		= raid10_run,
 	.free		= raid10_free,
-	.status		= status,
-	.error_handler	= error,
+	.status		= raid10_status,
+	.error_handler	= raid10_error,
 	.hot_add_disk	= raid10_add_disk,
 	.hot_remove_disk= raid10_remove_disk,
 	.spare_active	= raid10_spare_active,
-	.sync_request	= sync_request,
+	.sync_request	= raid10_sync_request,
 	.quiesce	= raid10_quiesce,
 	.size		= raid10_size,
 	.resize		= raid10_resize,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a086014..b4f02c9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2496,7 +2496,7 @@
 	dev->sector = raid5_compute_blocknr(sh, i, previous);
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 {
 	char b[BDEVNAME_SIZE];
 	struct r5conf *conf = mddev->private;
@@ -2958,7 +2958,7 @@
 	 * If several bio share a stripe. The bio bi_phys_segments acts as a
 	 * reference count to avoid race. The reference count should already be
 	 * increased before this function is called (for example, in
-	 * make_request()), so other bio sharing this stripe will not free the
+	 * raid5_make_request()), so other bio sharing this stripe will not free the
 	 * stripe. If a stripe is owned by one stripe, the stripe lock will
 	 * protect it.
 	 */
@@ -5135,7 +5135,7 @@
 	}
 }
 
-static void make_request(struct mddev *mddev, struct bio * bi)
+static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 {
 	struct r5conf *conf = mddev->private;
 	int dd_idx;
@@ -5225,7 +5225,7 @@
 		new_sector = raid5_compute_sector(conf, logical_sector,
 						  previous,
 						  &dd_idx, NULL);
-		pr_debug("raid456: make_request, sector %llu logical %llu\n",
+		pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
 			(unsigned long long)new_sector,
 			(unsigned long long)logical_sector);
 
@@ -5575,7 +5575,8 @@
 	return retn;
 }
 
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
+					  int *skipped)
 {
 	struct r5conf *conf = mddev->private;
 	struct stripe_head *sh;
@@ -6674,7 +6675,7 @@
 	return 0;
 }
 
-static int run(struct mddev *mddev)
+static int raid5_run(struct mddev *mddev)
 {
 	struct r5conf *conf;
 	int working_disks = 0;
@@ -7048,7 +7049,7 @@
 	mddev->to_remove = &raid5_attrs_group;
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid5_status(struct seq_file *seq, struct mddev *mddev)
 {
 	struct r5conf *conf = mddev->private;
 	int i;
@@ -7864,15 +7865,15 @@
 	.name		= "raid6",
 	.level		= 6,
 	.owner		= THIS_MODULE,
-	.make_request	= make_request,
-	.run		= run,
+	.make_request	= raid5_make_request,
+	.run		= raid5_run,
 	.free		= raid5_free,
-	.status		= status,
-	.error_handler	= error,
+	.status		= raid5_status,
+	.error_handler	= raid5_error,
 	.hot_add_disk	= raid5_add_disk,
 	.hot_remove_disk= raid5_remove_disk,
 	.spare_active	= raid5_spare_active,
-	.sync_request	= sync_request,
+	.sync_request	= raid5_sync_request,
 	.resize		= raid5_resize,
 	.size		= raid5_size,
 	.check_reshape	= raid6_check_reshape,
@@ -7887,15 +7888,15 @@
 	.name		= "raid5",
 	.level		= 5,
 	.owner		= THIS_MODULE,
-	.make_request	= make_request,
-	.run		= run,
+	.make_request	= raid5_make_request,
+	.run		= raid5_run,
 	.free		= raid5_free,
-	.status		= status,
-	.error_handler	= error,
+	.status		= raid5_status,
+	.error_handler	= raid5_error,
 	.hot_add_disk	= raid5_add_disk,
 	.hot_remove_disk= raid5_remove_disk,
 	.spare_active	= raid5_spare_active,
-	.sync_request	= sync_request,
+	.sync_request	= raid5_sync_request,
 	.resize		= raid5_resize,
 	.size		= raid5_size,
 	.check_reshape	= raid5_check_reshape,
@@ -7911,15 +7912,15 @@
 	.name		= "raid4",
 	.level		= 4,
 	.owner		= THIS_MODULE,
-	.make_request	= make_request,
-	.run		= run,
+	.make_request	= raid5_make_request,
+	.run		= raid5_run,
 	.free		= raid5_free,
-	.status		= status,
-	.error_handler	= error,
+	.status		= raid5_status,
+	.error_handler	= raid5_error,
 	.hot_add_disk	= raid5_add_disk,
 	.hot_remove_disk= raid5_remove_disk,
 	.spare_active	= raid5_spare_active,
-	.sync_request	= sync_request,
+	.sync_request	= raid5_sync_request,
 	.resize		= raid5_resize,
 	.size		= raid5_size,
 	.check_reshape	= raid5_check_reshape,
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 0e209b5..c6abeb4 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -903,9 +903,18 @@
 {
 	struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
 	struct tda1004x_state* state = fe->demodulator_priv;
+	int status;
 
 	dprintk("%s\n", __func__);
 
+	status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
+	if (status == -1)
+		return -EIO;
+
+	/* Only update the properties cache if device is locked */
+	if (!(status & 8))
+		return 0;
+
 	// inversion status
 	fe_params->inversion = INVERSION_OFF;
 	if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 8304919..bf82726 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -478,7 +478,6 @@
 	{ "ir_rx_z8f0811_hdpvr", 0 },
 	{ }
 };
-MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
 
 static struct i2c_driver ir_kbd_driver = {
 	.driver = {
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index b9e43ff..cbe4711 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -144,8 +144,7 @@
 	mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
 	if (mf) {
 		mutex_lock(&sensor->lock);
-		if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
-			*mf = fmt->format;
+		*mf = fmt->format;
 		mutex_unlock(&sensor->lock);
 	}
 	return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 1d2c310..94f8162 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1211,6 +1211,8 @@
 
 static int alsa_device_exit(struct saa7134_dev *dev)
 {
+	if (!snd_saa7134_cards[dev->nr])
+		return 1;
 
 	snd_card_free(snd_saa7134_cards[dev->nr]);
 	snd_saa7134_cards[dev->nr] = NULL;
@@ -1260,7 +1262,8 @@
 	int idx;
 
 	for (idx = 0; idx < SNDRV_CARDS; idx++) {
-		snd_card_free(snd_saa7134_cards[idx]);
+		if (snd_saa7134_cards[idx])
+			snd_card_free(snd_saa7134_cards[idx]);
 	}
 
 	saa7134_dmasound_init = NULL;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 5263594..8b89ebe1 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -215,6 +215,7 @@
 config VIDEO_STI_BDISP
 	tristate "STMicroelectronics BDISP 2D blitter driver"
 	depends on VIDEO_DEV && VIDEO_V4L2
+	depends on HAS_DMA
 	depends on ARCH_STI || COMPILE_TEST
 	select VIDEOBUF2_DMA_CONTIG
 	select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 40423c6..57d42c6 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -1,6 +1,6 @@
 
 config VIDEO_SAMSUNG_EXYNOS4_IS
-	bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
+	tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
 	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
 	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
 	depends on OF && COMMON_CLK
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 49658ca..979c388 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -631,6 +631,12 @@
 
 	fimc_is_mem_barrier();
 
+	/*
+	 * Some user space use cases hang up here without this
+	 * empirically chosen delay.
+	 */
+	udelay(100);
+
 	mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0));
 	mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
 	mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2));
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index bf9261e..c081672 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -218,8 +218,8 @@
 							ivb->dma_addr[i];
 
 			isp_dbg(2, &video->ve.vdev,
-				"dma_buf %pad (%d/%d/%d) addr: %pad\n",
-				&buf_index, ivb->index, i, vb->index,
+				"dma_buf %d (%d/%d/%d) addr: %pad\n",
+				buf_index, ivb->index, i, vb->index,
 				&ivb->dma_addr[i]);
 		}
 
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index f3b2dd3..e79ddbb 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -186,32 +186,20 @@
 }
 
 /**
- * __fimc_pipeline_open - update the pipeline information, enable power
- *                        of all pipeline subdevs and the sensor clock
- * @me: media entity to start graph walk with
- * @prepare: true to walk the current pipeline and acquire all subdevs
+ * __fimc_pipeline_enable - enable power of all pipeline subdevs
+ *			    and the sensor clock
+ * @ep: video pipeline structure
+ * @fmd: fimc media device
  *
  * Called with the graph mutex held.
  */
-static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
-				struct media_entity *me, bool prepare)
+static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
+				  struct fimc_md *fmd)
 {
-	struct fimc_md *fmd = entity_to_fimc_mdev(me);
 	struct fimc_pipeline *p = to_fimc_pipeline(ep);
-	struct v4l2_subdev *sd;
 	int ret;
 
-	if (WARN_ON(p == NULL || me == NULL))
-		return -EINVAL;
-
-	if (prepare)
-		fimc_pipeline_prepare(p, me);
-
-	sd = p->subdevs[IDX_SENSOR];
-	if (sd == NULL)
-		return -EINVAL;
-
-	/* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
+	/* Enable PXLASYNC clock if this pipeline includes FIMC-IS */
 	if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
 		ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
 		if (ret < 0)
@@ -229,6 +217,40 @@
 }
 
 /**
+ * __fimc_pipeline_open - update the pipeline information, enable power
+ *                        of all pipeline subdevs and the sensor clock
+ * @me: media entity to start graph walk with
+ * @prepare: true to walk the current pipeline and acquire all subdevs
+ *
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
+				struct media_entity *me, bool prepare)
+{
+	struct fimc_md *fmd = entity_to_fimc_mdev(me);
+	struct fimc_pipeline *p = to_fimc_pipeline(ep);
+	struct v4l2_subdev *sd;
+
+	if (WARN_ON(p == NULL || me == NULL))
+		return -EINVAL;
+
+	if (prepare)
+		fimc_pipeline_prepare(p, me);
+
+	sd = p->subdevs[IDX_SENSOR];
+	if (sd == NULL) {
+		pr_warn("%s(): No sensor subdev\n", __func__);
+		/*
+		 * Pipeline open cannot fail so as to make it possible
+		 * for the user space to configure the pipeline.
+		 */
+		return 0;
+	}
+
+	return __fimc_pipeline_enable(ep, fmd);
+}
+
+/**
  * __fimc_pipeline_close - disable the sensor clock and pipeline power
  * @fimc: fimc device terminating the pipeline
  *
@@ -269,10 +291,43 @@
 		{ IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
 	};
 	struct fimc_pipeline *p = to_fimc_pipeline(ep);
+	struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity);
+	enum fimc_subdev_index sd_id;
 	int i, ret = 0;
 
-	if (p->subdevs[IDX_SENSOR] == NULL)
-		return -ENODEV;
+	if (p->subdevs[IDX_SENSOR] == NULL) {
+		if (!fmd->user_subdev_api) {
+			/*
+			 * Sensor must be already discovered if we
+			 * aren't in the user_subdev_api mode
+			 */
+			return -ENODEV;
+		}
+
+		/* Get pipeline sink entity */
+		if (p->subdevs[IDX_FIMC])
+			sd_id = IDX_FIMC;
+		else if (p->subdevs[IDX_IS_ISP])
+			sd_id = IDX_IS_ISP;
+		else if (p->subdevs[IDX_FLITE])
+			sd_id = IDX_FLITE;
+		else
+			return -ENODEV;
+
+		/*
+		 * Sensor could have been linked between open and STREAMON -
+		 * check if this is the case.
+		 */
+		fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity);
+
+		if (p->subdevs[IDX_SENSOR] == NULL)
+			return -ENODEV;
+
+		ret = __fimc_pipeline_enable(ep, fmd);
+		if (ret < 0)
+			return ret;
+
+	}
 
 	for (i = 0; i < IDX_MAX; i++) {
 		unsigned int idx = seq[on][i];
@@ -282,8 +337,10 @@
 		if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
 			goto error;
 	}
+
 	return 0;
 error:
+	fimc_pipeline_s_power(p, !on);
 	for (; i >= 0; i--) {
 		unsigned int idx = seq[on][i];
 		v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index c398b28..1af779e 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -795,7 +795,7 @@
 			xlate->host_fmt	= &isi_camera_formats[i];
 			xlate->code	= code.code;
 			dev_dbg(icd->parent, "Providing format %s using code %d\n",
-				isi_camera_formats[0].name, code.code);
+				xlate->host_fmt->name, xlate->code);
 		}
 		break;
 	default:
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index cc84c6d..46c7186 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -1493,6 +1493,8 @@
 					struct soc_camera_async_client, notifier);
 	struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev);
 
+	icd->control = NULL;
+
 	if (icd->clk) {
 		v4l2_clk_unregister(icd->clk);
 		icd->clk = NULL;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 42dff9d..533bc79 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -256,7 +256,7 @@
 
 	/* Create links. */
 	list_for_each_entry(entity, &vsp1->entities, list_dev) {
-		if (entity->type == VSP1_ENTITY_LIF) {
+		if (entity->type == VSP1_ENTITY_WPF) {
 			ret = vsp1_wpf_create_links(vsp1, entity);
 			if (ret < 0)
 				goto done;
@@ -264,7 +264,10 @@
 			ret = vsp1_rpf_create_links(vsp1, entity);
 			if (ret < 0)
 				goto done;
-		} else {
+		}
+
+		if (entity->type != VSP1_ENTITY_LIF &&
+		    entity->type != VSP1_ENTITY_RPF) {
 			ret = vsp1_create_links(vsp1, entity);
 			if (ret < 0)
 				goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 637d0d6..b4dca57 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -515,7 +515,7 @@
 	bool stopped;
 
 	spin_lock_irqsave(&pipe->irqlock, flags);
-	stopped = pipe->state == VSP1_PIPELINE_STOPPED,
+	stopped = pipe->state == VSP1_PIPELINE_STOPPED;
 	spin_unlock_irqrestore(&pipe->irqlock, flags);
 
 	return stopped;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index c5d49d7..ff8953a 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1063,8 +1063,11 @@
  */
 static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
 {
-	int ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
-			vb, pb, vb->planes);
+	int ret = 0;
+
+	if (pb)
+		ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+				 vb, pb, vb->planes);
 	return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
 }
 
@@ -1077,14 +1080,16 @@
 	struct vb2_queue *q = vb->vb2_queue;
 	void *mem_priv;
 	unsigned int plane;
-	int ret;
+	int ret = 0;
 	enum dma_data_direction dma_dir =
 		q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 	bool reacquired = vb->planes[0].mem_priv == NULL;
 
 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
 	/* Copy relevant information provided by the userspace */
-	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+	if (pb)
+		ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+				 vb, pb, planes);
 	if (ret)
 		return ret;
 
@@ -1192,14 +1197,16 @@
 	struct vb2_queue *q = vb->vb2_queue;
 	void *mem_priv;
 	unsigned int plane;
-	int ret;
+	int ret = 0;
 	enum dma_data_direction dma_dir =
 		q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 	bool reacquired = vb->planes[0].mem_priv == NULL;
 
 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
 	/* Copy relevant information provided by the userspace */
-	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+	if (pb)
+		ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+				 vb, pb, planes);
 	if (ret)
 		return ret;
 
@@ -1520,7 +1527,8 @@
 	q->waiting_for_buffers = false;
 	vb->state = VB2_BUF_STATE_QUEUED;
 
-	call_void_bufop(q, copy_timestamp, vb, pb);
+	if (pb)
+		call_void_bufop(q, copy_timestamp, vb, pb);
 
 	trace_vb2_qbuf(q, vb);
 
@@ -1532,7 +1540,8 @@
 		__enqueue_in_driver(vb);
 
 	/* Fill buffer information for the userspace */
-	call_void_bufop(q, fill_user_buffer, vb, pb);
+	if (pb)
+		call_void_bufop(q, fill_user_buffer, vb, pb);
 
 	/*
 	 * If streamon has been called, and we haven't yet called
@@ -1731,7 +1740,8 @@
  * The return values from this function are intended to be directly returned
  * from vidioc_dqbuf handler in driver.
  */
-int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+		   bool nonblocking)
 {
 	struct vb2_buffer *vb = NULL;
 	int ret;
@@ -1754,8 +1764,12 @@
 
 	call_void_vb_qop(vb, buf_finish, vb);
 
+	if (pindex)
+		*pindex = vb->index;
+
 	/* Fill buffer information for the userspace */
-	call_void_bufop(q, fill_user_buffer, vb, pb);
+	if (pb)
+		call_void_bufop(q, fill_user_buffer, vb, pb);
 
 	/* Remove from videobuf queue */
 	list_del(&vb->queued_entry);
@@ -1828,7 +1842,7 @@
 	 * that's done in dqbuf, but that's not going to happen when we
 	 * cancel the whole queue. Note: this code belongs here, not in
 	 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
-	 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
+	 * call to __fill_user_buffer() after buf_finish(). That order can't
 	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
 	 */
 	for (i = 0; i < q->num_buffers; ++i) {
@@ -2357,7 +2371,6 @@
 	unsigned int count;
 	unsigned int type;
 	unsigned int memory;
-	struct vb2_buffer *b;
 	struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
 	unsigned int cur_index;
 	unsigned int initial_index;
@@ -2410,12 +2423,6 @@
 	if (fileio == NULL)
 		return -ENOMEM;
 
-	fileio->b = kzalloc(q->buf_struct_size, GFP_KERNEL);
-	if (fileio->b == NULL) {
-		kfree(fileio);
-		return -ENOMEM;
-	}
-
 	fileio->read_once = q->fileio_read_once;
 	fileio->write_immediately = q->fileio_write_immediately;
 
@@ -2460,13 +2467,7 @@
 		 * Queue all buffers.
 		 */
 		for (i = 0; i < q->num_buffers; i++) {
-			struct vb2_buffer *b = fileio->b;
-
-			memset(b, 0, q->buf_struct_size);
-			b->type = q->type;
-			b->memory = q->memory;
-			b->index = i;
-			ret = vb2_core_qbuf(q, i, b);
+			ret = vb2_core_qbuf(q, i, NULL);
 			if (ret)
 				goto err_reqbufs;
 			fileio->bufs[i].queued = 1;
@@ -2511,7 +2512,6 @@
 		q->fileio = NULL;
 		fileio->count = 0;
 		vb2_core_reqbufs(q, fileio->memory, &fileio->count);
-		kfree(fileio->b);
 		kfree(fileio);
 		dprintk(3, "file io emulator closed\n");
 	}
@@ -2539,7 +2539,8 @@
 	 * else is able to provide this information with the write() operation.
 	 */
 	bool copy_timestamp = !read && q->copy_timestamp;
-	int ret, index;
+	unsigned index;
+	int ret;
 
 	dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
 		read ? "read" : "write", (long)*ppos, count,
@@ -2564,22 +2565,20 @@
 	 */
 	index = fileio->cur_index;
 	if (index >= q->num_buffers) {
-		struct vb2_buffer *b = fileio->b;
+		struct vb2_buffer *b;
 
 		/*
 		 * Call vb2_dqbuf to get buffer back.
 		 */
-		memset(b, 0, q->buf_struct_size);
-		b->type = q->type;
-		b->memory = q->memory;
-		ret = vb2_core_dqbuf(q, b, nonblock);
+		ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
 		dprintk(5, "vb2_dqbuf result: %d\n", ret);
 		if (ret)
 			return ret;
 		fileio->dq_count += 1;
 
-		fileio->cur_index = index = b->index;
+		fileio->cur_index = index;
 		buf = &fileio->bufs[index];
+		b = q->bufs[index];
 
 		/*
 		 * Get number of bytes filled by the driver
@@ -2630,7 +2629,7 @@
 	 * Queue next buffer if required.
 	 */
 	if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
-		struct vb2_buffer *b = fileio->b;
+		struct vb2_buffer *b = q->bufs[index];
 
 		/*
 		 * Check if this is the last buffer to read.
@@ -2643,15 +2642,11 @@
 		/*
 		 * Call vb2_qbuf and give buffer to the driver.
 		 */
-		memset(b, 0, q->buf_struct_size);
-		b->type = q->type;
-		b->memory = q->memory;
-		b->index = index;
 		b->planes[0].bytesused = buf->pos;
 
 		if (copy_timestamp)
 			b->timestamp = ktime_get_ns();
-		ret = vb2_core_qbuf(q, index, b);
+		ret = vb2_core_qbuf(q, index, NULL);
 		dprintk(5, "vb2_dbuf result: %d\n", ret);
 		if (ret)
 			return ret;
@@ -2713,10 +2708,9 @@
 {
 	struct vb2_queue *q = data;
 	struct vb2_threadio_data *threadio = q->threadio;
-	struct vb2_fileio_data *fileio = q->fileio;
 	bool copy_timestamp = false;
-	int prequeue = 0;
-	int index = 0;
+	unsigned prequeue = 0;
+	unsigned index = 0;
 	int ret = 0;
 
 	if (q->is_output) {
@@ -2728,37 +2722,34 @@
 
 	for (;;) {
 		struct vb2_buffer *vb;
-		struct vb2_buffer *b = fileio->b;
 
 		/*
 		 * Call vb2_dqbuf to get buffer back.
 		 */
-		memset(b, 0, q->buf_struct_size);
-		b->type = q->type;
-		b->memory = q->memory;
 		if (prequeue) {
-			b->index = index++;
+			vb = q->bufs[index++];
 			prequeue--;
 		} else {
 			call_void_qop(q, wait_finish, q);
 			if (!threadio->stop)
-				ret = vb2_core_dqbuf(q, b, 0);
+				ret = vb2_core_dqbuf(q, &index, NULL, 0);
 			call_void_qop(q, wait_prepare, q);
 			dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+			if (!ret)
+				vb = q->bufs[index];
 		}
 		if (ret || threadio->stop)
 			break;
 		try_to_freeze();
 
-		vb = q->bufs[b->index];
-		if (b->state == VB2_BUF_STATE_DONE)
+		if (vb->state != VB2_BUF_STATE_ERROR)
 			if (threadio->fnc(vb, threadio->priv))
 				break;
 		call_void_qop(q, wait_finish, q);
 		if (copy_timestamp)
-			b->timestamp = ktime_get_ns();;
+			vb->timestamp = ktime_get_ns();;
 		if (!threadio->stop)
-			ret = vb2_core_qbuf(q, b->index, b);
+			ret = vb2_core_qbuf(q, vb->index, NULL);
 		call_void_qop(q, wait_prepare, q);
 		if (ret || threadio->stop)
 			break;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index c9a2860..91f5521 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -625,7 +625,7 @@
 		return -EINVAL;
 	}
 
-	ret = vb2_core_dqbuf(q, b, nonblocking);
+	ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
 
 	return ret;
 }
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e6e4bac..12099b0 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2048,6 +2048,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
 
 static int config_hot_period(u16 val)
 {
@@ -2074,11 +2075,13 @@
 
 	return config_hot_period(cycles32k);
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
 
 int db8500_prcmu_stop_temp_sense(void)
 {
 	return config_hot_period(0xFFFF);
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
 
 static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
 {
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 677d0362..80f9afc 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -458,7 +458,11 @@
 {
 	struct mei_cl *cl = file->private_data;
 
-	return mei_cl_notify_request(cl, file, request);
+	if (request != MEI_HBM_NOTIFICATION_START &&
+	    request != MEI_HBM_NOTIFICATION_STOP)
+		return -EINVAL;
+
+	return mei_cl_notify_request(cl, file, (u8)request);
 }
 
 /**
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 5914263..fe207e5 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -47,13 +47,10 @@
 #include "queue.h"
 
 MODULE_ALIAS("mmc:block");
-
-#ifdef KERNEL
 #ifdef MODULE_PARAM_PREFIX
 #undef MODULE_PARAM_PREFIX
 #endif
 #define MODULE_PARAM_PREFIX "mmcblk."
-#endif
 
 #define INAND_CMD38_ARG_EXT_CSD  113
 #define INAND_CMD38_ARG_ERASE    0x00
@@ -655,8 +652,10 @@
 	}
 
 	md = mmc_blk_get(bdev->bd_disk);
-	if (!md)
+	if (!md) {
+		err = -EINVAL;
 		goto cmd_err;
+	}
 
 	card = md->queue.card;
 	if (IS_ERR(card)) {
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1c1b45e..3446097 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -925,6 +925,10 @@
 
 			dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
 						PAGE_SIZE, dir);
+			if (dma_mapping_error(dma_dev, dma_addr)) {
+				data->error = -EFAULT;
+				break;
+			}
 			if (direction == DMA_TO_DEVICE)
 				t->tx_dma = dma_addr + sg->offset;
 			else
@@ -1393,10 +1397,12 @@
 		host->dma_dev = dev;
 		host->ones_dma = dma_map_single(dev, ones,
 				MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, host->ones_dma))
+			goto fail_ones_dma;
 		host->data_dma = dma_map_single(dev, host->data,
 				sizeof(*host->data), DMA_BIDIRECTIONAL);
-
-		/* REVISIT in theory those map operations can fail... */
+		if (dma_mapping_error(dev, host->data_dma))
+			goto fail_data_dma;
 
 		dma_sync_single_for_cpu(host->dma_dev,
 				host->data_dma, sizeof(*host->data),
@@ -1462,6 +1468,11 @@
 	if (host->dma_dev)
 		dma_unmap_single(host->dma_dev, host->data_dma,
 				sizeof(*host->data), DMA_BIDIRECTIONAL);
+fail_data_dma:
+	if (host->dma_dev)
+		dma_unmap_single(host->dma_dev, host->ones_dma,
+				MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+fail_ones_dma:
 	kfree(host->data);
 
 fail_nobuf1:
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ce08896..da82477 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -86,7 +86,7 @@
 static inline void pxamci_init_ocr(struct pxamci_host *host)
 {
 #ifdef CONFIG_REGULATOR
-	host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");
+	host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
 
 	if (IS_ERR(host->vcc))
 		host->vcc = NULL;
@@ -654,12 +654,8 @@
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_irq(pdev, 0);
-	if (!r || irq < 0)
-		return -ENXIO;
-
-	r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
-	if (!r)
-		return -EBUSY;
+	if (irq < 0)
+		return irq;
 
 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
 	if (!mmc) {
@@ -695,7 +691,7 @@
 	host->pdata = pdev->dev.platform_data;
 	host->clkrt = CLKRT_OFF;
 
-	host->clk = clk_get(&pdev->dev, NULL);
+	host->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(host->clk)) {
 		ret = PTR_ERR(host->clk);
 		host->clk = NULL;
@@ -727,9 +723,9 @@
 	host->irq = irq;
 	host->imask = MMC_I_MASK_ALL;
 
-	host->base = ioremap(r->start, SZ_4K);
-	if (!host->base) {
-		ret = -ENOMEM;
+	host->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(host->base)) {
+		ret = PTR_ERR(host->base);
 		goto out;
 	}
 
@@ -742,7 +738,8 @@
 	writel(64, host->base + MMC_RESTO);
 	writel(host->imask, host->base + MMC_I_MASK);
 
-	ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
+	ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
+			       DRIVER_NAME, host);
 	if (ret)
 		goto out;
 
@@ -804,7 +801,7 @@
 		dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
 		goto out;
 	} else {
-		mmc->caps |= host->pdata->gpio_card_ro_invert ?
+		mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
 			0 : MMC_CAP2_RO_ACTIVE_HIGH;
 	}
 
@@ -833,14 +830,9 @@
 			dma_release_channel(host->dma_chan_rx);
 		if (host->dma_chan_tx)
 			dma_release_channel(host->dma_chan_tx);
-		if (host->base)
-			iounmap(host->base);
-		if (host->clk)
-			clk_put(host->clk);
 	}
 	if (mmc)
 		mmc_free_host(mmc);
-	release_resource(r);
 	return ret;
 }
 
@@ -859,9 +851,6 @@
 			gpio_ro = host->pdata->gpio_card_ro;
 			gpio_power = host->pdata->gpio_power;
 		}
-		if (host->vcc)
-			regulator_put(host->vcc);
-
 		if (host->pdata && host->pdata->exit)
 			host->pdata->exit(&pdev->dev, mmc);
 
@@ -870,16 +859,10 @@
 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
 		       host->base + MMC_I_MASK);
 
-		free_irq(host->irq, host);
 		dmaengine_terminate_all(host->dma_chan_rx);
 		dmaengine_terminate_all(host->dma_chan_tx);
 		dma_release_channel(host->dma_chan_rx);
 		dma_release_channel(host->dma_chan_tx);
-		iounmap(host->base);
-
-		clk_put(host->clk);
-
-		release_resource(host->res);
 
 		mmc_free_host(mmc);
 	}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index f6047fc..a5cda92 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -146,6 +146,33 @@
 	.ops = &sdhci_acpi_ops_int,
 };
 
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+	int gpio_cd = mmc_gpio_get_cd(mmc);
+	struct sdhci_host *host = mmc_priv(mmc);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!gpio_cd)
+		return 0;
+
+	pm_runtime_get_sync(mmc->parent);
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	if (host->flags & SDHCI_DEVICE_DEAD)
+		goto out;
+
+	ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	pm_runtime_mark_last_busy(mmc->parent);
+	pm_runtime_put_autosuspend(mmc->parent);
+
+	return ret;
+}
+
 static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
 				      const char *hid, const char *uid)
 {
@@ -196,6 +223,9 @@
 
 	/* Platform specific code during sd probe slot goes here */
 
+	if (hid && !strcmp(hid, "80865ACA"))
+		host->mmc_host_ops.get_cd = bxt_get_cd;
+
 	return 0;
 }
 
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7e7d8f0..9cb86fb 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -217,6 +217,7 @@
 pm_runtime_disable:
 	pm_runtime_disable(&pdev->dev);
 	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_put_noidle(&pdev->dev);
 clocks_disable_unprepare:
 	clk_disable_unprepare(priv->gck);
 	clk_disable_unprepare(priv->mainck);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index cc851b0..df3b8ec 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -330,6 +330,33 @@
 	sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
 }
 
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+	int gpio_cd = mmc_gpio_get_cd(mmc);
+	struct sdhci_host *host = mmc_priv(mmc);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!gpio_cd)
+		return 0;
+
+	pm_runtime_get_sync(mmc->parent);
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	if (host->flags & SDHCI_DEVICE_DEAD)
+		goto out;
+
+	ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	pm_runtime_mark_last_busy(mmc->parent);
+	pm_runtime_put_autosuspend(mmc->parent);
+
+	return ret;
+}
+
 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
 {
 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -362,6 +389,10 @@
 	slot->cd_con_id = NULL;
 	slot->cd_idx = 0;
 	slot->cd_override_level = true;
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+
 	return 0;
 }
 
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d622435..add9fdf 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1360,7 +1360,7 @@
 	sdhci_runtime_pm_get(host);
 
 	/* Firstly check card presence */
-	present = sdhci_do_get_cd(host);
+	present = mmc->ops->get_cd(mmc);
 
 	spin_lock_irqsave(&host->lock, flags);
 
@@ -2849,6 +2849,8 @@
 
 	host = mmc_priv(mmc);
 	host->mmc = mmc;
+	host->mmc_host_ops = sdhci_ops;
+	mmc->ops = &host->mmc_host_ops;
 
 	return host;
 }
@@ -3037,7 +3039,6 @@
 	/*
 	 * Set host parameters.
 	 */
-	mmc->ops = &sdhci_ops;
 	max_clk = host->max_clk;
 
 	if (host->ops->get_min_clock)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7654ae5..0115e99 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -430,6 +430,7 @@
 
 	/* Internal data */
 	struct mmc_host *mmc;	/* MMC structure */
+	struct mmc_host_ops mmc_host_ops;	/* MMC host ops */
 	u64 dma_mask;		/* custom DMA mask */
 
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 1ca8a13..6234eab3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -445,7 +445,7 @@
 							pdata->slave_id_rx);
 	} else {
 		host->chan_tx = dma_request_slave_channel(dev, "tx");
-		host->chan_tx = dma_request_slave_channel(dev, "rx");
+		host->chan_rx = dma_request_slave_channel(dev, "rx");
 	}
 	dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
 		host->chan_rx);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 49eea89..3010080 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7831,6 +7831,14 @@
 	return ret;
 }
 
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+	/* Check if we will never have enough descriptors,
+	 * as gso_segs can be more than current ring size
+	 */
+	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 
 /* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7934,14 +7942,19 @@
 		 * vlan encapsulated.
 		 */
 		if (skb->protocol == htons(ETH_P_8021Q) ||
-		    skb->protocol == htons(ETH_P_8021AD))
-			return tg3_tso_bug(tp, tnapi, txq, skb);
+		    skb->protocol == htons(ETH_P_8021AD)) {
+			if (tg3_tso_bug_gso_check(tnapi, skb))
+				return tg3_tso_bug(tp, tnapi, txq, skb);
+			goto drop;
+		}
 
 		if (!skb_is_gso_v6(skb)) {
 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
-			    tg3_flag(tp, TSO_BUG))
-				return tg3_tso_bug(tp, tnapi, txq, skb);
-
+			    tg3_flag(tp, TSO_BUG)) {
+				if (tg3_tso_bug_gso_check(tnapi, skb))
+					return tg3_tso_bug(tp, tnapi, txq, skb);
+				goto drop;
+			}
 			ip_csum = iph->check;
 			ip_tot_len = iph->tot_len;
 			iph->check = 0;
@@ -8073,7 +8086,7 @@
 	if (would_hit_hwbug) {
 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 
-		if (mss) {
+		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
 			/* If it's a TSO packet, do GSO instead of
 			 * allocating and copying to a large linear SKB
 			 */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 1671fa3..7ba6d53 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
 
 #define DRV_NAME		"enic"
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION		"2.3.0.12"
+#define DRV_VERSION		"2.3.0.20"
 #define DRV_COPYRIGHT		"Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX		6
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 1ffd105..1fdf5fe 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -298,7 +298,8 @@
 			  int wait)
 {
 	struct devcmd2_controller *dc2c = vdev->devcmd2;
-	struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+	struct devcmd2_result *result;
+	u8 color;
 	unsigned int i;
 	int delay, err;
 	u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@
 	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
 		return 0;
 
+	result = dc2c->result + dc2c->next_result;
+	color = dc2c->color;
+
+	dc2c->next_result++;
+	if (dc2c->next_result == dc2c->result_size) {
+		dc2c->next_result = 0;
+		dc2c->color = dc2c->color ? 0 : 1;
+	}
+
 	for (delay = 0; delay < wait; delay++) {
-		if (result->color == dc2c->color) {
-			dc2c->next_result++;
-			if (dc2c->next_result == dc2c->result_size) {
-				dc2c->next_result = 0;
-				dc2c->color = dc2c->color ? 0 : 1;
-			}
+		if (result->color == color) {
 			if (result->error) {
 				err = result->error;
 				if (err != ERR_ECMDUNKNOWN ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index d48d579..e94ca1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2429,7 +2429,7 @@
 	flush_workqueue(priv->mfunc.master.comm_wq);
 	destroy_workqueue(priv->mfunc.master.comm_wq);
 err_slaves:
-	while (--i) {
+	while (i--) {
 		for (port = 1; port <= MLX4_MAX_PORTS; port++)
 			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
 	}
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 70814b7..fc8bbff 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1880,9 +1880,9 @@
 	}
 	netdev_reset_queue(ndev);
 
+	dwceqos_init_hw(lp);
 	napi_enable(&lp->napi);
 	phy_start(lp->phy_dev);
-	dwceqos_init_hw(lp);
 
 	netif_start_queue(ndev);
 	tasklet_enable(&lp->tx_bdreclaim_tasklet);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 0b14ac3..028e387 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,6 +1039,17 @@
 	return geneve_xmit_skb(skb, dev, info);
 }
 
+static int geneve_change_mtu(struct net_device *dev, int new_mtu)
+{
+	/* GENEVE overhead is not fixed, so we can't enforce a more
+	 * precise max MTU.
+	 */
+	if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 {
 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1083,7 +1094,7 @@
 	.ndo_stop		= geneve_stop,
 	.ndo_start_xmit		= geneve_xmit,
 	.ndo_get_stats64	= ip_tunnel_get_stats64,
-	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_change_mtu		= geneve_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_fill_metadata_dst	= geneve_fill_metadata_dst,
@@ -1442,11 +1453,21 @@
 
 	err = geneve_configure(net, dev, &geneve_remote_unspec,
 			       0, 0, 0, htons(dst_port), true, 0);
-	if (err) {
-		free_netdev(dev);
-		return ERR_PTR(err);
-	}
+	if (err)
+		goto err;
+
+	/* openvswitch users expect packet sizes to be unrestricted,
+	 * so set the largest MTU we can.
+	 */
+	err = geneve_change_mtu(dev, IP_MAX_MTU);
+	if (err)
+		goto err;
+
 	return dev;
+
+ err:
+	free_netdev(dev);
+	return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6543918..a31cd95 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2367,27 +2367,41 @@
 {
 }
 
+static int __vxlan_change_mtu(struct net_device *dev,
+			      struct net_device *lowerdev,
+			      struct vxlan_rdst *dst, int new_mtu, bool strict)
+{
+	int max_mtu = IP_MAX_MTU;
+
+	if (lowerdev)
+		max_mtu = lowerdev->mtu;
+
+	if (dst->remote_ip.sa.sa_family == AF_INET6)
+		max_mtu -= VXLAN6_HEADROOM;
+	else
+		max_mtu -= VXLAN_HEADROOM;
+
+	if (new_mtu < 68)
+		return -EINVAL;
+
+	if (new_mtu > max_mtu) {
+		if (strict)
+			return -EINVAL;
+
+		new_mtu = max_mtu;
+	}
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_rdst *dst = &vxlan->default_dst;
-	struct net_device *lowerdev;
-	int max_mtu;
-
-	lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
-	if (lowerdev == NULL)
-		return eth_change_mtu(dev, new_mtu);
-
-	if (dst->remote_ip.sa.sa_family == AF_INET6)
-		max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
-	else
-		max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
-
-	if (new_mtu < 68 || new_mtu > max_mtu)
-		return -EINVAL;
-
-	dev->mtu = new_mtu;
-	return 0;
+	struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+							 dst->remote_ifindex);
+	return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
 }
 
 static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
@@ -2765,6 +2779,7 @@
 	int err;
 	bool use_ipv6 = false;
 	__be16 default_port = vxlan->cfg.dst_port;
+	struct net_device *lowerdev = NULL;
 
 	vxlan->net = src_net;
 
@@ -2785,9 +2800,7 @@
 	}
 
 	if (conf->remote_ifindex) {
-		struct net_device *lowerdev
-			 = __dev_get_by_index(src_net, conf->remote_ifindex);
-
+		lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
 		dst->remote_ifindex = conf->remote_ifindex;
 
 		if (!lowerdev) {
@@ -2811,6 +2824,12 @@
 		needed_headroom = lowerdev->hard_header_len;
 	}
 
+	if (conf->mtu) {
+		err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
+		if (err)
+			return err;
+	}
+
 	if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
 		needed_headroom += VXLAN6_HEADROOM;
 	else
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 5d62373..b586d84 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -17,5 +17,6 @@
 	  and block devices nodes, as well a a translation for a small
 	  number of selected SCSI commands to NVMe commands to the NVMe
 	  driver.  If you don't know what this means you probably want
-	  to say N here, and if you know what it means you probably
-	  want to say N as well.
+	  to say N here, unless you run a distro that abuses the SCSI
+	  emulation to provide stable device names for mount by id, like
+	  some OpenSuSE and SLES versions.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c5bf001..3cd921e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1121,7 +1121,6 @@
 	ns->queue = blk_mq_init_queue(ctrl->tagset);
 	if (IS_ERR(ns->queue))
 		goto out_free_ns;
-	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
 	ns->queue->queuedata = ns;
 	ns->ctrl = ctrl;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5cd3725..6bb15e4 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,9 +146,10 @@
 	};
 };
 
+#define NVME_NVM_LP_MLC_PAIRS 886
 struct nvme_nvm_lp_mlc {
 	__u16			num_pairs;
-	__u8			pairs[886];
+	__u8			pairs[NVME_NVM_LP_MLC_PAIRS];
 };
 
 struct nvme_nvm_lp_tbl {
@@ -282,9 +283,14 @@
 			memcpy(dst->lptbl.id, src->lptbl.id, 8);
 			dst->lptbl.mlc.num_pairs =
 					le16_to_cpu(src->lptbl.mlc.num_pairs);
-			/* 4 bits per pair */
+
+			if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
+				pr_err("nvm: number of MLC pairs not supported\n");
+				return -EINVAL;
+			}
+
 			memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
-						dst->lptbl.mlc.num_pairs >> 1);
+						dst->lptbl.mlc.num_pairs);
 		}
 	}
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4fb5bb7..9664d07 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -139,9 +139,9 @@
 	u32 val = 0;
 
 	if (ctrl->ops->io_incapable(ctrl))
-		return false;
+		return true;
 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
-		return false;
+		return true;
 	return val & NVME_CSTS_CFS;
 }
 
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72ef832..a128672 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -678,6 +678,11 @@
 	blk_mq_start_request(req);
 
 	spin_lock_irq(&nvmeq->q_lock);
+	if (unlikely(nvmeq->cq_vector < 0)) {
+		ret = BLK_MQ_RQ_QUEUE_BUSY;
+		spin_unlock_irq(&nvmeq->q_lock);
+		goto out;
+	}
 	__nvme_submit_cmd(nvmeq, &cmnd);
 	nvme_process_cq(nvmeq);
 	spin_unlock_irq(&nvmeq->q_lock);
@@ -999,7 +1004,7 @@
 	if (!blk_mq_request_started(req))
 		return;
 
-	dev_warn(nvmeq->q_dmadev,
+	dev_dbg_ratelimited(nvmeq->q_dmadev,
 		 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
 
 	status = NVME_SC_ABORT_REQ;
@@ -2111,16 +2116,12 @@
 {
 	struct nvme_dev *dev = pci_get_drvdata(pdev);
 
-	spin_lock(&dev_list_lock);
-	list_del_init(&dev->node);
-	spin_unlock(&dev_list_lock);
-
 	pci_set_drvdata(pdev, NULL);
-	flush_work(&dev->reset_work);
 	flush_work(&dev->scan_work);
 	nvme_remove_namespaces(&dev->ctrl);
 	nvme_uninit_ctrl(&dev->ctrl);
 	nvme_dev_disable(dev, true);
+	flush_work(&dev->reset_work);
 	nvme_dev_remove_admin(dev);
 	nvme_free_queues(dev, 0);
 	nvme_release_cmb(dev);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 6fd4e5a..9d11d98 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -70,6 +70,9 @@
 	if (pos >= nvmem->size)
 		return 0;
 
+	if (count < nvmem->word_size)
+		return -EINVAL;
+
 	if (pos + count > nvmem->size)
 		count = nvmem->size - pos;
 
@@ -95,6 +98,9 @@
 	if (pos >= nvmem->size)
 		return 0;
 
+	if (count < nvmem->word_size)
+		return -EINVAL;
+
 	if (pos + count > nvmem->size)
 		count = nvmem->size - pos;
 
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index afb67e7..3829e5f 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,6 +21,7 @@
 	.reg_bits = 32,
 	.val_bits = 8,
 	.reg_stride = 1,
+	.val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static struct nvmem_config econfig = {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7ee21ae..e7bfc17 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -635,6 +635,13 @@
 		msi_base = be32_to_cpup(msi_map + 2);
 		rid_len = be32_to_cpup(msi_map + 3);
 
+		if (rid_base & ~map_mask) {
+			dev_err(parent_dev,
+				"Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
+				map_mask, rid_base);
+			return rid_out;
+		}
+
 		msi_controller_node = of_find_node_by_phandle(phandle);
 
 		matched = (masked_rid >= rid_base &&
@@ -654,7 +661,7 @@
 	if (!matched)
 		return rid_out;
 
-	rid_out = masked_rid + msi_base;
+	rid_out = masked_rid - rid_base + msi_base;
 	dev_dbg(dev,
 		"msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
 		dev_name(parent_dev), map_mask, rid_base, msi_base,
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5648317..39c4be4 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -154,6 +154,7 @@
 	{ .compatible = "marvell,88E1111", },
 	{ .compatible = "marvell,88e1116", },
 	{ .compatible = "marvell,88e1118", },
+	{ .compatible = "marvell,88e1145", },
 	{ .compatible = "marvell,88e1149r", },
 	{ .compatible = "marvell,88e1310", },
 	{ .compatible = "marvell,88E1510", },
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 5816bce..a576aee 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -64,7 +64,6 @@
 #define OARR_SIZE_CFG                BIT(OARR_SIZE_CFG_SHIFT)
 
 #define MAX_NUM_OB_WINDOWS           2
-#define MAX_NUM_PAXC_PF              4
 
 #define IPROC_PCIE_REG_INVALID 0xffff
 
@@ -170,20 +169,6 @@
 	writel(val, pcie->base + offset + (window * 8));
 }
 
-static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
-					      unsigned int slot,
-					      unsigned int fn)
-{
-	if (slot > 0)
-		return false;
-
-	/* PAXC can only support limited number of functions */
-	if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
-		return false;
-
-	return true;
-}
-
 /**
  * Note access to the configuration registers are protected at the higher layer
  * by 'pci_lock' in drivers/pci/access.c
@@ -199,11 +184,11 @@
 	u32 val;
 	u16 offset;
 
-	if (!iproc_pcie_device_is_valid(pcie, slot, fn))
-		return NULL;
-
 	/* root complex access */
 	if (busno == 0) {
+		if (slot > 0 || fn > 0)
+			return NULL;
+
 		iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
 				     where & CFG_IND_ADDR_MASK);
 		offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
@@ -213,6 +198,14 @@
 			return (pcie->base + offset);
 	}
 
+	/*
+	 * PAXC is connected to an internally emulated EP within the SoC.  It
+	 * allows only one device.
+	 */
+	if (pcie->type == IPROC_PCIE_PAXC)
+		if (slot > 0)
+			return NULL;
+
 	/* EP device access */
 	val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
 		(slot << CFG_ADDR_DEV_NUM_SHIFT) |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a2..48d21e0 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@
 	rpc->rpd = dev;
 	INIT_WORK(&rpc->dpc_handler, aer_isr);
 	mutex_init(&rpc->rpc_mutex);
-	init_waitqueue_head(&rpc->wait_release);
 
 	/* Use PCIe bus function to store rpc into PCIe device */
 	set_service_data(dev, rpc);
@@ -285,8 +284,7 @@
 		if (rpc->isr)
 			free_irq(dev->irq, dev);
 
-		wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+		flush_work(&rpc->dpc_handler);
 		aer_disable_rootport(rpc);
 		kfree(rpc);
 		set_service_data(dev, NULL);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 84420b7..945c939 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@
 					 * recovery on the same
 					 * root port hierarchy
 					 */
-	wait_queue_head_t wait_release;
 };
 
 struct aer_broadcast_data {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 7123925..521e39c 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -811,8 +811,6 @@
 	while (get_e_source(rpc, &e_src))
 		aer_isr_one_error(p_device, &e_src);
 	mutex_unlock(&rpc->rpc_mutex);
-
-	wake_up(&rpc->wait_release);
 }
 
 /**
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index e7e117d..0124d17 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -224,6 +224,7 @@
 
 config PHY_HI6220_USB
 	tristate "hi6220 USB PHY support"
+	depends on (ARCH_HISI && ARM64) || COMPILE_TEST
 	select GENERIC_PHY
 	select MFD_SYSCON
 	help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8c7f27d..e7e574d 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -275,20 +275,21 @@
 
 int phy_power_on(struct phy *phy)
 {
-	int ret;
+	int ret = 0;
 
 	if (!phy)
-		return 0;
+		goto out;
 
 	if (phy->pwr) {
 		ret = regulator_enable(phy->pwr);
 		if (ret)
-			return ret;
+			goto out;
 	}
 
 	ret = phy_pm_runtime_get_sync(phy);
 	if (ret < 0 && ret != -ENOTSUPP)
-		return ret;
+		goto err_pm_sync;
+
 	ret = 0; /* Override possible ret == -ENOTSUPP */
 
 	mutex_lock(&phy->mutex);
@@ -296,19 +297,20 @@
 		ret = phy->ops->power_on(phy);
 		if (ret < 0) {
 			dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
-			goto out;
+			goto err_pwr_on;
 		}
 	}
 	++phy->power_count;
 	mutex_unlock(&phy->mutex);
 	return 0;
 
-out:
+err_pwr_on:
 	mutex_unlock(&phy->mutex);
 	phy_pm_runtime_put_sync(phy);
+err_pm_sync:
 	if (phy->pwr)
 		regulator_disable(phy->pwr);
-
+out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(phy_power_on);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 4a3fc6e..840f3ea 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -715,6 +715,7 @@
 	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
 	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
 
 	/* Our job is to use irqs and status from the power module
 	 * to keep the transceiver disabled when nothing's connected.
@@ -750,6 +751,7 @@
 	struct twl4030_usb *twl = platform_get_drvdata(pdev);
 	int val;
 
+	usb_remove_phy(&twl->phy);
 	pm_runtime_get_sync(twl->dev);
 	cancel_delayed_work(&twl->id_workaround_work);
 	device_remove_file(twl->dev, &dev_attr_vbus);
@@ -757,6 +759,13 @@
 	/* set transceiver mode to power on defaults */
 	twl4030_usb_set_mode(twl, -1);
 
+	/* idle ulpi before powering off */
+	if (cable_present(twl->linkstat))
+		pm_runtime_put_noidle(twl->dev);
+	pm_runtime_mark_last_busy(twl->dev);
+	pm_runtime_put_sync_suspend(twl->dev);
+	pm_runtime_disable(twl->dev);
+
 	/* autogate 60MHz ULPI clock,
 	 * clear dpll clock request for i2c access,
 	 * disable 32KHz
@@ -771,11 +780,6 @@
 	/* disable complete OTG block */
 	twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
 
-	if (cable_present(twl->linkstat))
-		pm_runtime_put_noidle(twl->dev);
-	pm_runtime_mark_last_busy(twl->dev);
-	pm_runtime_put(twl->dev);
-
 	return 0;
 }
 
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16d48a4..e96e86d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -347,6 +347,7 @@
 		ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
 		break;
 	case PIN_CONFIG_INPUT_ENABLE:
+		mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
 		ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
 		break;
 	case PIN_CONFIG_OUTPUT:
@@ -354,6 +355,7 @@
 		ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
 		break;
 	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+		mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
 		ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
 		break;
 	case PIN_CONFIG_DRIVE_STRENGTH:
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4d4738..3ef798f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -666,16 +666,19 @@
 		struct mvebu_mpp_ctrl_setting *set = &mode->settings[0];
 		struct mvebu_pinctrl_group *grp;
 		unsigned num_settings;
+		unsigned supp_settings;
 
-		for (num_settings = 0; ; set++) {
+		for (num_settings = 0, supp_settings = 0; ; set++) {
 			if (!set->name)
 				break;
 
+			num_settings++;
+
 			/* skip unsupported settings for this variant */
 			if (pctl->variant && !(pctl->variant & set->variant))
 				continue;
 
-			num_settings++;
+			supp_settings++;
 
 			/* find gpio/gpo/gpi settings */
 			if (strcmp(set->name, "gpio") == 0)
@@ -688,7 +691,7 @@
 		}
 
 		/* skip modes with no settings for this variant */
-		if (!num_settings)
+		if (!supp_settings)
 			continue;
 
 		grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 085e601..1f7469c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -191,6 +191,7 @@
 		dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
 }
 
+#ifdef CONFIG_DEBUG_FS
 static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset,
 				  enum abx500_gpio_pull_updown *pull_updown)
 {
@@ -226,6 +227,7 @@
 
 	return ret;
 }
+#endif
 
 static int abx500_set_pull_updown(struct abx500_pinctrl *pct,
 				  int offset, enum abx500_gpio_pull_updown val)
@@ -468,6 +470,7 @@
 	return ret;
 }
 
+#ifdef CONFIG_DEBUG_FS
 static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
 			  unsigned gpio)
 {
@@ -553,8 +556,6 @@
 	return ret;
 }
 
-#ifdef CONFIG_DEBUG_FS
-
 #include <linux/seq_file.h>
 
 static void abx500_gpio_dbg_show_one(struct seq_file *s,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d90e205..216f227 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,6 +426,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(pxa2xx_pinctrl_init);
 
 int pxa2xx_pinctrl_exit(struct platform_device *pdev)
 {
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f67b1e9..5cc97f8 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -514,25 +514,35 @@
 	.pin_config_group_set	= samsung_pinconf_group_set,
 };
 
-/* gpiolib gpio_set callback function */
-static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+/*
+ * The samsung_gpio_set_vlaue() should be called with "bank->slock" held
+ * to avoid race condition.
+ */
+static void samsung_gpio_set_value(struct gpio_chip *gc,
+					  unsigned offset, int value)
 {
 	struct samsung_pin_bank *bank = gpiochip_get_data(gc);
 	const struct samsung_pin_bank_type *type = bank->type;
-	unsigned long flags;
 	void __iomem *reg;
 	u32 data;
 
 	reg = bank->drvdata->virt_base + bank->pctl_offset;
 
-	spin_lock_irqsave(&bank->slock, flags);
-
 	data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
 	data &= ~(1 << offset);
 	if (value)
 		data |= 1 << offset;
 	writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
+}
 
+/* gpiolib gpio_set callback function */
+static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+	struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&bank->slock, flags);
+	samsung_gpio_set_value(gc, offset, value);
 	spin_unlock_irqrestore(&bank->slock, flags);
 }
 
@@ -553,6 +563,8 @@
 }
 
 /*
+ * The samsung_gpio_set_direction() should be called with "bank->slock" held
+ * to avoid race condition.
  * The calls to gpio_direction_output() and gpio_direction_input()
  * leads to this function call.
  */
@@ -564,7 +576,6 @@
 	struct samsung_pinctrl_drv_data *drvdata;
 	void __iomem *reg;
 	u32 data, mask, shift;
-	unsigned long flags;
 
 	bank = gpiochip_get_data(gc);
 	type = bank->type;
@@ -581,31 +592,42 @@
 		reg += 4;
 	}
 
-	spin_lock_irqsave(&bank->slock, flags);
-
 	data = readl(reg);
 	data &= ~(mask << shift);
 	if (!input)
 		data |= FUNC_OUTPUT << shift;
 	writel(data, reg);
 
-	spin_unlock_irqrestore(&bank->slock, flags);
-
 	return 0;
 }
 
 /* gpiolib gpio_direction_input callback function. */
 static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
 {
-	return samsung_gpio_set_direction(gc, offset, true);
+	struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&bank->slock, flags);
+	ret = samsung_gpio_set_direction(gc, offset, true);
+	spin_unlock_irqrestore(&bank->slock, flags);
+	return ret;
 }
 
 /* gpiolib gpio_direction_output callback function. */
 static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
 							int value)
 {
-	samsung_gpio_set(gc, offset, value);
-	return samsung_gpio_set_direction(gc, offset, false);
+	struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&bank->slock, flags);
+	samsung_gpio_set_value(gc, offset, value);
+	ret = samsung_gpio_set_direction(gc, offset, false);
+	spin_unlock_irqrestore(&bank->slock, flags);
+
+	return ret;
 }
 
 /*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 77d4cf0..11760bb 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -492,6 +492,7 @@
 	.pins = sun8i_h3_pins,
 	.npins = ARRAY_SIZE(sun8i_h3_pins),
 	.irq_banks = 2,
+	.irq_read_needs_mux = true
 };
 
 static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 20f0ad9..e20f23e 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -41,8 +41,7 @@
 	{ KE_KEY, 4, { KEY_HOME } },
 	{ KE_KEY, 5, { KEY_END } },
 	{ KE_KEY, 6, { KEY_PAGEUP } },
-	{ KE_KEY, 4, { KEY_PAGEDOWN } },
-	{ KE_KEY, 4, { KEY_HOME } },
+	{ KE_KEY, 7, { KEY_PAGEDOWN } },
 	{ KE_KEY, 8, { KEY_RFKILL } },
 	{ KE_KEY, 9, { KEY_POWER } },
 	{ KE_KEY, 11, { KEY_SLEEP } },
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 02bc5a6..aa45424 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -49,7 +49,7 @@
 
 static int scu_reg_access(u32 cmd, struct scu_ipc_data  *data)
 {
-	int count = data->count;
+	unsigned int count = data->count;
 
 	if (count == 0 || count == 3 || count > 4)
 		return -EINVAL;
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f700723..d28e3ab 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -342,6 +342,7 @@
 /* Device IDs of parts that have 32KB MCH space */
 static const unsigned int mch_quirk_devices[] = {
 	0x0154,	/* Ivy Bridge */
+	0x0a04, /* Haswell-ULT */
 	0x0c00,	/* Haswell */
 	0x1604, /* Broadwell */
 };
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 41605da..c78db05 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3035,6 +3035,7 @@
 		max = block->base->discipline->max_blocks << block->s2b_shift;
 	}
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
+	block->request_queue->limits.max_dev_sectors = max;
 	blk_queue_logical_block_size(block->request_queue,
 				     block->bp_block);
 	blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1db..286782c 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@
 		spin_unlock_irqrestore(&lcu->lock, flags);
 		cancel_work_sync(&lcu->suc_data.worker);
 		spin_lock_irqsave(&lcu->lock, flags);
-		if (device == lcu->suc_data.device)
+		if (device == lcu->suc_data.device) {
+			dasd_put_device(device);
 			lcu->suc_data.device = NULL;
+		}
 	}
 	was_pending = 0;
 	if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@
 		was_pending = 1;
 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
 		spin_lock_irqsave(&lcu->lock, flags);
-		if (device == lcu->ruac_data.device)
+		if (device == lcu->ruac_data.device) {
+			dasd_put_device(device);
 			lcu->ruac_data.device = NULL;
+		}
 	}
 	private->lcu = NULL;
 	spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@
 	if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
 			    " alias data in lcu (rc = %d), retry later", rc);
-		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
+		if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
+			dasd_put_device(device);
 	} else {
+		dasd_put_device(device);
 		lcu->ruac_data.device = NULL;
 		lcu->flags &= ~UPDATE_PENDING;
 	}
@@ -593,8 +599,10 @@
 	 */
 	if (!usedev)
 		return -EINVAL;
+	dasd_get_device(usedev);
 	lcu->ruac_data.device = usedev;
-	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
+	if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
+		dasd_put_device(usedev);
 	return 0;
 }
 
@@ -723,7 +731,7 @@
 	ASCEBC((char *) &cqr->magic, 4);
 	ccw = cqr->cpaddr;
 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
-	ccw->flags = 0 ;
+	ccw->flags = CCW_FLAG_SLI;
 	ccw->count = 16;
 	ccw->cda = (__u32)(addr_t) cqr->data;
 	((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@
 	/* 3. read new alias configuration */
 	_schedule_lcu_update(lcu, device);
 	lcu->suc_data.device = NULL;
+	dasd_put_device(device);
 	spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
@@ -989,6 +998,8 @@
 	}
 	lcu->suc_data.reason = reason;
 	lcu->suc_data.device = device;
+	dasd_get_device(device);
 	spin_unlock(&lcu->lock);
-	schedule_work(&lcu->suc_data.worker);
+	if (!schedule_work(&lcu->suc_data.worker))
+		dasd_put_device(device);
 };
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3613581..93880ed 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -562,7 +562,7 @@
 			/*
 			 * Command Lock contention
 			 */
-			err = SCSI_DH_RETRY;
+			err = SCSI_DH_IMM_RETRY;
 		break;
 	default:
 		break;
@@ -612,6 +612,8 @@
 		err = mode_select_handle_sense(sdev, h->sense);
 		if (err == SCSI_DH_RETRY && retry_cnt--)
 			goto retry;
+		if (err == SCSI_DH_IMM_RETRY)
+			goto retry;
 	}
 	if (err == SCSI_DH_OK) {
 		h->state = RDAC_STATE_ACTIVE;
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index b676618..d1dd161 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -1,6 +1,6 @@
 config SCSI_HISI_SAS
 	tristate "HiSilicon SAS"
-	depends on HAS_DMA
+	depends on HAS_DMA && HAS_IOMEM
 	depends on ARM64 || COMPILE_TEST
 	select SCSI_SAS_LIBSAS
 	select BLK_DEV_INTEGRITY
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 057fdeb..eea24d7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1289,13 +1289,10 @@
 		goto out;
 	}
 
-	if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) {
-		if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) ||
-		    !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK))
-			ts->stat = SAS_DATA_OVERRUN;
-		else
-			slot_err_v1_hw(hisi_hba, task, slot);
+	if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK &&
+		!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
 
+		slot_err_v1_hw(hisi_hba, task, slot);
 		goto out;
 	}
 
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52a8765..692a757 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2204,7 +2204,7 @@
 	/* Clear outstanding commands array. */
 	for (que = 0; que < ha->max_req_queues; que++) {
 		req = ha->req_q_map[que];
-		if (!req)
+		if (!req || !test_bit(que, ha->req_qid_map))
 			continue;
 		req->out_ptr = (void *)(req->ring + req->length);
 		*req->out_ptr = 0;
@@ -2221,7 +2221,7 @@
 
 	for (que = 0; que < ha->max_rsp_queues; que++) {
 		rsp = ha->rsp_q_map[que];
-		if (!rsp)
+		if (!rsp || !test_bit(que, ha->rsp_qid_map))
 			continue;
 		rsp->in_ptr = (void *)(rsp->ring + rsp->length);
 		*rsp->in_ptr = 0;
@@ -4981,7 +4981,7 @@
 
 	for (i = 1; i < ha->max_rsp_queues; i++) {
 		rsp = ha->rsp_q_map[i];
-		if (rsp) {
+		if (rsp && test_bit(i, ha->rsp_qid_map)) {
 			rsp->options &= ~BIT_0;
 			ret = qla25xx_init_rsp_que(base_vha, rsp);
 			if (ret != QLA_SUCCESS)
@@ -4996,8 +4996,8 @@
 	}
 	for (i = 1; i < ha->max_req_queues; i++) {
 		req = ha->req_q_map[i];
-		if (req) {
-		/* Clear outstanding commands array. */
+		if (req && test_bit(i, ha->req_qid_map)) {
+			/* Clear outstanding commands array. */
 			req->options &= ~BIT_0;
 			ret = qla25xx_init_req_que(base_vha, req);
 			if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d4d65eb..4af9547 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3063,9 +3063,9 @@
 		    "MSI-X: Failed to enable support "
 		    "-- %d/%d\n Retry with %d vectors.\n",
 		    ha->msix_count, ret, ret);
+		ha->msix_count = ret;
+		ha->max_rsp_queues = ha->msix_count - 1;
 	}
-	ha->msix_count = ret;
-	ha->max_rsp_queues = ha->msix_count - 1;
 	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
 				ha->msix_count, GFP_KERNEL);
 	if (!ha->msix_entries) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c5dd594..cf7ba52 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -600,7 +600,7 @@
 	/* Delete request queues */
 	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
 		req = ha->req_q_map[cnt];
-		if (req) {
+		if (req && test_bit(cnt, ha->req_qid_map)) {
 			ret = qla25xx_delete_req_que(vha, req);
 			if (ret != QLA_SUCCESS) {
 				ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@
 	/* Delete response queues */
 	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
 		rsp = ha->rsp_q_map[cnt];
-		if (rsp) {
+		if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
 			ret = qla25xx_delete_rsp_que(vha, rsp);
 			if (ret != QLA_SUCCESS) {
 				ql_log(ql_log_warn, vha, 0x00eb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f1788db..f6c7ce3 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -409,6 +409,9 @@
 	int cnt;
 
 	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+		if (!test_bit(cnt, ha->req_qid_map))
+			continue;
+
 		req = ha->req_q_map[cnt];
 		qla2x00_free_req_que(ha, req);
 	}
@@ -416,6 +419,9 @@
 	ha->req_q_map = NULL;
 
 	for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+		if (!test_bit(cnt, ha->rsp_qid_map))
+			continue;
+
 		rsp = ha->rsp_q_map[cnt];
 		qla2x00_free_rsp_que(ha, rsp);
 	}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8075a4c..ee967be 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -105,7 +105,7 @@
 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
 	int fn, void *iocb, int flags);
 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
-	*cmd, struct atio_from_isp *atio, int ha_locked);
+	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
 	struct qla_tgt_srr_imm *imm, int ha_lock);
 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
@@ -1756,7 +1756,7 @@
 		qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
 		    0, 0, 0, 0, 0, 0);
 	else {
-		if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
 			qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
 			    mcmd->fc_tm_rsp, false);
 		else
@@ -2665,7 +2665,7 @@
 			/* no need to terminate. FW already freed exchange. */
 			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
 		else
-			qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+			qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 		return 0;
 	}
@@ -3173,7 +3173,8 @@
 }
 
 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
-	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
+	int ul_abort)
 {
 	unsigned long flags = 0;
 	int rc;
@@ -3193,8 +3194,7 @@
 		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
 
 done:
-	if (cmd && (!cmd->aborted ||
-	    !cmd->cmd_sent_to_fw)) {
+	if (cmd && !ul_abort && !cmd->aborted) {
 		if (cmd->sg_mapped)
 			qlt_unmap_sg(vha, cmd);
 		vha->hw->tgt.tgt_ops->free_cmd(cmd);
@@ -3253,21 +3253,38 @@
 
 }
 
-void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
 {
 	struct qla_tgt *tgt = cmd->tgt;
 	struct scsi_qla_host *vha = tgt->vha;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
+	unsigned long flags;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
 	    "qla_target(%d): terminating exchange for aborted cmd=%p "
 	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
 	    se_cmd->tag);
 
+	spin_lock_irqsave(&cmd->cmd_lock, flags);
+	if (cmd->aborted) {
+		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+		/*
+		 * It's normal to see 2 calls in this path:
+		 *  1) XFER Rdy completion + CMD_T_ABORT
+		 *  2) TCM TMR - drain_state_list
+		 */
+	        ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+			"multiple abort. %p transport_state %x, t_state %x,"
+			" se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
+			cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
+		return EIO;
+	}
 	cmd->aborted = 1;
 	cmd->cmd_flags |= BIT_6;
+	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
 
-	qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+	qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
+	return 0;
 }
 EXPORT_SYMBOL(qlt_abort_cmd);
 
@@ -3282,6 +3299,9 @@
 
 	BUG_ON(cmd->cmd_in_wq);
 
+	if (cmd->sg_mapped)
+		qlt_unmap_sg(cmd->vha, cmd);
+
 	if (!cmd->q_full)
 		qlt_decr_num_pend_cmds(cmd->vha);
 
@@ -3399,7 +3419,7 @@
 		term = 1;
 
 	if (term)
-		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
 
 	return term;
 }
@@ -3580,12 +3600,13 @@
 		case CTIO_PORT_LOGGED_OUT:
 		case CTIO_PORT_UNAVAILABLE:
 		{
-			int logged_out = (status & 0xFFFF);
+			int logged_out =
+				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
+
 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
 			    "qla_target(%d): CTIO with %s status %x "
 			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
-			    (logged_out == CTIO_PORT_LOGGED_OUT) ?
-			    "PORT LOGGED OUT" : "PORT UNAVAILABLE",
+			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
 			    status, cmd->state, se_cmd);
 
 			if (logged_out && cmd->sess) {
@@ -3754,6 +3775,7 @@
 		goto out_term;
 	}
 
+	spin_lock_init(&cmd->cmd_lock);
 	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
 	cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
 	cmd->unpacked_lun = scsilun_to_int(
@@ -3796,7 +3818,7 @@
 	 */
 	cmd->cmd_flags |= BIT_2;
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
 
 	qlt_decr_num_pend_cmds(vha);
 	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
@@ -3918,7 +3940,7 @@
 
 out_term:
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	qlt_send_term_exchange(vha, NULL, &op->atio, 1);
+	qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	kfree(op);
 
@@ -3982,7 +4004,8 @@
 
 	cmd->cmd_in_wq = 1;
 	cmd->cmd_flags |= BIT_0;
-	cmd->se_cmd.cpuid = -1;
+	cmd->se_cmd.cpuid = ha->msix_count ?
+		ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
 
 	spin_lock(&vha->cmd_list_lock);
 	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
@@ -3990,7 +4013,6 @@
 
 	INIT_WORK(&cmd->work, qlt_do_work);
 	if (ha->msix_count) {
-		cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
 		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
 			queue_work_on(smp_processor_id(), qla_tgt_wq,
 			    &cmd->work);
@@ -4771,7 +4793,7 @@
 		dump_stack();
 	} else {
 		cmd->cmd_flags |= BIT_9;
-		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
@@ -4950,7 +4972,7 @@
 				    sctio, sctio->srr_id);
 				list_del(&sctio->srr_list_entry);
 				qlt_send_term_exchange(vha, sctio->cmd,
-				    &sctio->cmd->atio, 1);
+				    &sctio->cmd->atio, 1, 0);
 				kfree(sctio);
 			}
 		}
@@ -5123,7 +5145,7 @@
 	    atio->u.isp24.fcp_hdr.s_id);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 	if (!sess) {
-		qlt_send_term_exchange(vha, NULL, atio, 1);
+		qlt_send_term_exchange(vha, NULL, atio, 1, 0);
 		return 0;
 	}
 	/* Sending marker isn't necessary, since we called from ISR */
@@ -5406,7 +5428,7 @@
 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
 				qlt_send_busy(vha, atio, SAM_STAT_BUSY);
 #else
-				qlt_send_term_exchange(vha, NULL, atio, 1);
+				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
 #endif
 
 				if (!ha_locked)
@@ -5523,7 +5545,7 @@
 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
 				qlt_send_busy(vha, atio, 0);
 #else
-				qlt_send_term_exchange(vha, NULL, atio, 1);
+				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
 #endif
 			} else {
 				if (tgt->tgt_stop) {
@@ -5532,7 +5554,7 @@
 					    "command to target, sending TERM "
 					    "EXCHANGE for rsp\n");
 					qlt_send_term_exchange(vha, NULL,
-					    atio, 1);
+					    atio, 1, 0);
 				} else {
 					ql_dbg(ql_dbg_tgt, vha, 0xe060,
 					    "qla_target(%d): Unable to send "
@@ -5960,7 +5982,7 @@
 	return;
 
 out_term:
-	qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0);
+	qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
 	if (sess)
 		ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 71b2865..22a6a76 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -943,6 +943,36 @@
 	qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
 };
 
+typedef enum {
+	/*
+	 * BIT_0 - Atio Arrival / schedule to work
+	 * BIT_1 - qlt_do_work
+	 * BIT_2 - qlt_do work failed
+	 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+	 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+	 * BIT_5 - status respond / tcm_qla2xx_queue_status
+	 * BIT_6 - tcm request to abort/Term exchange.
+	 *	pre_xmit_response->qlt_send_term_exchange
+	 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+	 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+	 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+	 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+
+	 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+	 * BIT_13 - Bad completion -
+	 *	qlt_ctio_do_completion --> qlt_term_ctio_exchange
+	 * BIT_14 - Back end data received/sent.
+	 * BIT_15 - SRR prepare ctio
+	 * BIT_16 - complete free
+	 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+	 * BIT_18 - completion w/abort status
+	 * BIT_19 - completion w/unknown status
+	 * BIT_20 - tcm_qla2xxx_free_cmd
+	 */
+	CMD_FLAG_DATA_WORK = BIT_11,
+	CMD_FLAG_DATA_WORK_FREE = BIT_21,
+} cmd_flags_t;
+
 struct qla_tgt_cmd {
 	struct se_cmd se_cmd;
 	struct qla_tgt_sess *sess;
@@ -952,6 +982,7 @@
 	/* Sense buffer that will be mapped into outgoing status */
 	unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
 
+	spinlock_t cmd_lock;
 	/* to save extra sess dereferences */
 	unsigned int conf_compl_supported:1;
 	unsigned int sg_mapped:1;
@@ -986,30 +1017,8 @@
 
 	uint64_t jiffies_at_alloc;
 	uint64_t jiffies_at_free;
-	/* BIT_0 - Atio Arrival / schedule to work
-	 * BIT_1 - qlt_do_work
-	 * BIT_2 - qlt_do work failed
-	 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
-	 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
-	 * BIT_5 - status respond / tcm_qla2xx_queue_status
-	 * BIT_6 - tcm request to abort/Term exchange.
-	 *	pre_xmit_response->qlt_send_term_exchange
-	 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
-	 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
-	 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
-	 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
-	 * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
-	 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
-	 * BIT_13 - Bad completion -
-	 *	qlt_ctio_do_completion --> qlt_term_ctio_exchange
-	 * BIT_14 - Back end data received/sent.
-	 * BIT_15 - SRR prepare ctio
-	 * BIT_16 - complete free
-	 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
-	 * BIT_18 - completion w/abort status
-	 * BIT_19 - completion w/unknown status
-	 */
-	uint32_t cmd_flags;
+
+	cmd_flags_t cmd_flags;
 };
 
 struct qla_tgt_sess_work_param {
@@ -1148,7 +1157,7 @@
 extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
 extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
 extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
-extern void qlt_abort_cmd(struct qla_tgt_cmd *);
+extern int qlt_abort_cmd(struct qla_tgt_cmd *);
 extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index ddbe2e7..c3e6225 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -395,6 +395,10 @@
 	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
 		for (i = 0; i < vha->hw->max_req_queues; i++) {
 			struct req_que *req = vha->hw->req_q_map[i];
+
+			if (!test_bit(i, vha->hw->req_qid_map))
+				continue;
+
 			if (req || !buf) {
 				length = req ?
 				    req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@
 	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+			if (!test_bit(i, vha->hw->rsp_qid_map))
+				continue;
+
 			if (rsp || !buf) {
 				length = rsp ?
 				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@
 	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
 		for (i = 0; i < vha->hw->max_req_queues; i++) {
 			struct req_que *req = vha->hw->req_q_map[i];
+
+			if (!test_bit(i, vha->hw->req_qid_map))
+				continue;
+
 			if (req || !buf) {
 				qla27xx_insert16(i, buf, len);
 				qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@
 	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+			if (!test_bit(i, vha->hw->rsp_qid_map))
+				continue;
+
 			if (rsp || !buf) {
 				qla27xx_insert16(i, buf, len);
 				qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index faf0a12..1808a01 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -298,6 +298,10 @@
 {
 	cmd->vha->tgt_counters.core_qla_free_cmd++;
 	cmd->cmd_in_wq = 1;
+
+	BUG_ON(cmd->cmd_flags & BIT_20);
+	cmd->cmd_flags |= BIT_20;
+
 	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
 	queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
 }
@@ -374,6 +378,20 @@
 {
 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
 				struct qla_tgt_cmd, se_cmd);
+
+	if (cmd->aborted) {
+		/* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
+		 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+		 * already kick start the free.
+		 */
+		pr_debug("write_pending aborted cmd[%p] refcount %d "
+			"transport_state %x, t_state %x, se_cmd_flags %x\n",
+			cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+			cmd->se_cmd.transport_state,
+			cmd->se_cmd.t_state,
+			cmd->se_cmd.se_cmd_flags);
+		return 0;
+	}
 	cmd->cmd_flags |= BIT_3;
 	cmd->bufflen = se_cmd->data_length;
 	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +423,7 @@
 	    se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
 		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 		wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
-					    3 * HZ);
+						50);
 		return 0;
 	}
 	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -444,6 +462,9 @@
 	if (bidi)
 		flags |= TARGET_SCF_BIDI_OP;
 
+	if (se_cmd->cpuid != WORK_CPU_UNBOUND)
+		flags |= TARGET_SCF_USE_CPUID;
+
 	sess = cmd->sess;
 	if (!sess) {
 		pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
@@ -465,13 +486,25 @@
 static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
 {
 	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+	unsigned long flags;
 
 	/*
 	 * Ensure that the complete FCP WRITE payload has been received.
 	 * Otherwise return an exception via CHECK_CONDITION status.
 	 */
 	cmd->cmd_in_wq = 0;
-	cmd->cmd_flags |= BIT_11;
+
+	spin_lock_irqsave(&cmd->cmd_lock, flags);
+	cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
+	if (cmd->aborted) {
+		cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+		tcm_qla2xxx_free_cmd(cmd);
+		return;
+	}
+	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
 	cmd->vha->tgt_counters.qla_core_ret_ctio++;
 	if (!cmd->write_data_transferred) {
 		/*
@@ -546,6 +579,20 @@
 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
 				struct qla_tgt_cmd, se_cmd);
 
+	if (cmd->aborted) {
+		/* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
+		 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+		 * already kick start the free.
+		 */
+		pr_debug("queue_data_in aborted cmd[%p] refcount %d "
+			"transport_state %x, t_state %x, se_cmd_flags %x\n",
+			cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+			cmd->se_cmd.transport_state,
+			cmd->se_cmd.t_state,
+			cmd->se_cmd.se_cmd_flags);
+		return 0;
+	}
+
 	cmd->cmd_flags |= BIT_4;
 	cmd->bufflen = se_cmd->data_length;
 	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -637,11 +684,34 @@
 	qlt_xmit_tm_rsp(mcmd);
 }
 
+
+#define DATA_WORK_NOT_FREE(_flags) \
+	(( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
+	 CMD_FLAG_DATA_WORK)
 static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
 {
 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
 				struct qla_tgt_cmd, se_cmd);
-	qlt_abort_cmd(cmd);
+	unsigned long flags;
+
+	if (qlt_abort_cmd(cmd))
+		return;
+
+	spin_lock_irqsave(&cmd->cmd_lock, flags);
+	if ((cmd->state == QLA_TGT_STATE_NEW)||
+		((cmd->state == QLA_TGT_STATE_DATA_IN) &&
+		 DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
+
+		cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+		/* Cmd have not reached firmware.
+		 * Use this trigger to free it. */
+		tcm_qla2xxx_free_cmd(cmd);
+		return;
+	}
+	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+	return;
+
 }
 
 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 47b9d13..bbfbfd9 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -205,6 +205,8 @@
 	{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
 	{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
 	{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+	{"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
+	{"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES},
 	{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
 	{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4f18a85..00bc7218 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1272,16 +1272,18 @@
 void scsi_remove_target(struct device *dev)
 {
 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
-	struct scsi_target *starget;
+	struct scsi_target *starget, *last_target = NULL;
 	unsigned long flags;
 
 restart:
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_for_each_entry(starget, &shost->__targets, siblings) {
-		if (starget->state == STARGET_DEL)
+		if (starget->state == STARGET_DEL ||
+		    starget == last_target)
 			continue;
 		if (starget->dev.parent == dev || &starget->dev == dev) {
 			kref_get(&starget->reap_ref);
+			last_target = starget;
 			spin_unlock_irqrestore(shost->host_lock, flags);
 			__scsi_remove_target(starget);
 			scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bb669d3..d749da7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -761,7 +761,7 @@
 		break;
 
 	default:
-		ret = BLKPREP_KILL;
+		ret = BLKPREP_INVALID;
 		goto out;
 	}
 
@@ -839,7 +839,7 @@
 	int ret;
 
 	if (sdkp->device->no_write_same)
-		return BLKPREP_KILL;
+		return BLKPREP_INVALID;
 
 	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 503ab8b..5e82067 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1261,7 +1261,7 @@
 	}
 
 	sfp->mmap_called = 1;
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = sfp;
 	vma->vm_ops = &sg_mmap_vm_ops;
 	return 0;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 55627d0..292c04e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -42,6 +42,7 @@
 #include <scsi/scsi_devinfo.h>
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
 
 /*
  * All wire protocol details (storage protocol between the guest and the host)
@@ -477,19 +478,18 @@
 struct storvsc_scan_work {
 	struct work_struct work;
 	struct Scsi_Host *host;
-	uint lun;
+	u8 lun;
+	u8 tgt_id;
 };
 
 static void storvsc_device_scan(struct work_struct *work)
 {
 	struct storvsc_scan_work *wrk;
-	uint lun;
 	struct scsi_device *sdev;
 
 	wrk = container_of(work, struct storvsc_scan_work, work);
-	lun = wrk->lun;
 
-	sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+	sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
 	if (!sdev)
 		goto done;
 	scsi_rescan_device(&sdev->sdev_gendev);
@@ -540,7 +540,7 @@
 	if (!scsi_host_get(wrk->host))
 		goto done;
 
-	sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+	sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
 
 	if (sdev) {
 		scsi_remove_device(sdev);
@@ -940,6 +940,7 @@
 
 	wrk->host = host;
 	wrk->lun = vm_srb->lun;
+	wrk->tgt_id = vm_srb->target_id;
 	INIT_WORK(&wrk->work, process_err_fn);
 	schedule_work(&wrk->work);
 }
@@ -1770,6 +1771,11 @@
 	fc_transport_template = fc_attach_transport(&fc_transport_functions);
 	if (!fc_transport_template)
 		return -ENODEV;
+
+	/*
+	 * Install Hyper-V specific timeout handler.
+	 */
+	fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
 #endif
 
 	ret = vmbus_driver_register(&storvsc_drv);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36..8feac59 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@
 
 	as->use_cs_gpios = true;
 	if (atmel_spi_is_v2(as) &&
+	    pdev->dev.of_node &&
 	    !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
 		as->use_cs_gpios = false;
 		master->num_chipselect = 4;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f84..ecc73c0 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
 
 /* Bitfields in CNTL1 */
 #define BCM2835_AUX_SPI_CNTL1_CSHIGH	0x00000700
-#define BCM2835_AUX_SPI_CNTL1_IDLE	0x00000080
-#define BCM2835_AUX_SPI_CNTL1_TXEMPTY	0x00000040
+#define BCM2835_AUX_SPI_CNTL1_TXEMPTY	0x00000080
+#define BCM2835_AUX_SPI_CNTL1_IDLE	0x00000040
 #define BCM2835_AUX_SPI_CNTL1_MSBF_IN	0x00000002
 #define BCM2835_AUX_SPI_CNTL1_KEEP_IN	0x00000001
 
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c..7cb0c19 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@
 /* SPCOM register values */
 #define SPCOM_CS(x)		((x) << 30)
 #define SPCOM_TRANLEN(x)	((x) << 0)
-#define	SPCOM_TRANLEN_MAX	0xFFFF	/* Max transaction length */
+#define	SPCOM_TRANLEN_MAX	0x10000	/* Max transaction length */
 
 #define AUTOSUSPEND_TIMEOUT 2000
 
@@ -233,7 +233,7 @@
 	reinit_completion(&mpc8xxx_spi->done);
 
 	/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
-	if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
+	if (t->len > SPCOM_TRANLEN_MAX) {
 		dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
 				" beyond the SPCOM[TRANLEN] field\n", t->len);
 		return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33c..6a4ff27 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -929,7 +929,7 @@
 					tx->sgl, tx->nents, DMA_MEM_TO_DEV,
 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 		if (!desc_tx)
-			goto no_dma;
+			goto tx_nodma;
 
 		desc_tx->callback = spi_imx_dma_tx_callback;
 		desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +941,7 @@
 					rx->sgl, rx->nents, DMA_DEV_TO_MEM,
 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 		if (!desc_rx)
-			goto no_dma;
+			goto rx_nodma;
 
 		desc_rx->callback = spi_imx_dma_rx_callback;
 		desc_rx->callback_param = (void *)spi_imx;
@@ -1008,7 +1008,9 @@
 
 	return ret;
 
-no_dma:
+rx_nodma:
+	dmaengine_terminate_all(master->dma_tx);
+tx_nodma:
 	pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
 		     dev_driver_string(&master->dev),
 		     dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f..cf4bb36 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@
 		test.iterate_transfer_mask = 1;
 
 	/* count number of transfers with tx/rx_buf != NULL */
+	rx_count = tx_count = 0;
 	for (i = 0; i < test.transfer_count; i++) {
 		if (test.transfers[i].tx_buf)
 			tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820..0caa3c8 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@
 	return status;
 
 disable_pm:
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 free_master:
 	spi_master_put(master);
@@ -1501,6 +1503,7 @@
 	struct spi_master *master = platform_get_drvdata(pdev);
 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
 
+	pm_runtime_dont_use_autosuspend(mcspi->dev);
 	pm_runtime_put_sync(mcspi->dev);
 	pm_runtime_disable(&pdev->dev);
 
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 58d4517..b9519be 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -6,6 +6,7 @@
 config AD7606
 	tristate "Analog Devices AD7606 ADC driver"
 	depends on GPIOLIB || COMPILE_TEST
+	depends on HAS_IOMEM
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
 	help
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index f129039..6928710 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -217,8 +217,12 @@
 static int ade7753_reset(struct device *dev)
 {
 	u16 val;
+	int ret;
 
-	ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+	ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+	if (ret)
+		return ret;
+
 	val |= BIT(6); /* Software Chip Reset */
 
 	return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
@@ -343,8 +347,12 @@
 static int ade7753_stop_device(struct device *dev)
 {
 	u16 val;
+	int ret;
 
-	ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+	ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+	if (ret)
+		return ret;
+
 	val |= BIT(4);  /* AD converters can be turned off */
 
 	return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
index ba87650..f1f3eca 100644
--- a/drivers/staging/rdma/Kconfig
+++ b/drivers/staging/rdma/Kconfig
@@ -22,12 +22,6 @@
 # Please keep entries in alphabetic order
 if STAGING_RDMA
 
-source "drivers/staging/rdma/amso1100/Kconfig"
-
-source "drivers/staging/rdma/ehca/Kconfig"
-
 source "drivers/staging/rdma/hfi1/Kconfig"
 
-source "drivers/staging/rdma/ipath/Kconfig"
-
 endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
index 139d78e..8c7fc1d 100644
--- a/drivers/staging/rdma/Makefile
+++ b/drivers/staging/rdma/Makefile
@@ -1,5 +1,2 @@
 # Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_AMSO1100)	+= amso1100/
-obj-$(CONFIG_INFINIBAND_EHCA)	+= ehca/
 obj-$(CONFIG_INFINIBAND_HFI1)	+= hfi1/
-obj-$(CONFIG_INFINIBAND_IPATH)	+= ipath/
diff --git a/drivers/staging/rdma/amso1100/Kbuild b/drivers/staging/rdma/amso1100/Kbuild
deleted file mode 100644
index 950dfab..0000000
--- a/drivers/staging/rdma/amso1100/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
-
-obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
-
-iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
-	c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/staging/rdma/amso1100/Kconfig b/drivers/staging/rdma/amso1100/Kconfig
deleted file mode 100644
index e6ce5f2..0000000
--- a/drivers/staging/rdma/amso1100/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-config INFINIBAND_AMSO1100
-	tristate "Ammasso 1100 HCA support"
-	depends on PCI && INET
-	---help---
-	  This is a low-level driver for the Ammasso 1100 host
-	  channel adapter (HCA).
-
-config INFINIBAND_AMSO1100_DEBUG
-	bool "Verbose debugging output"
-	depends on INFINIBAND_AMSO1100
-	default n
-	---help---
-	  This option causes the amso1100 driver to produce a bunch of
-	  debug messages.  Select this if you are developing the driver
-	  or trying to diagnose a problem.
diff --git a/drivers/staging/rdma/amso1100/TODO b/drivers/staging/rdma/amso1100/TODO
deleted file mode 100644
index 18b00a5..0000000
--- a/drivers/staging/rdma/amso1100/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-7/2015
-
-The amso1100 driver has been deprecated and moved to drivers/staging.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
deleted file mode 100644
index b46ebd1..0000000
--- a/drivers/staging/rdma/amso1100/c2.c
+++ /dev/null
@@ -1,1240 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_provider.h"
-
-MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
-MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
-    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-
-static int debug = -1;		/* defaults above */
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-static int c2_up(struct net_device *netdev);
-static int c2_down(struct net_device *netdev);
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-static void c2_tx_interrupt(struct net_device *netdev);
-static void c2_rx_interrupt(struct net_device *netdev);
-static irqreturn_t c2_interrupt(int irq, void *dev_id);
-static void c2_tx_timeout(struct net_device *netdev);
-static int c2_change_mtu(struct net_device *netdev, int new_mtu);
-static void c2_reset(struct c2_port *c2_port);
-
-static struct pci_device_id c2_pci_table[] = {
-	{ PCI_DEVICE(0x18b8, 0xb001) },
-	{ 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, c2_pci_table);
-
-static void c2_set_rxbufsize(struct c2_port *c2_port)
-{
-	struct net_device *netdev = c2_port->netdev;
-
-	if (netdev->mtu > RX_BUF_SIZE)
-		c2_port->rx_buf_size =
-		    netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
-		    NET_IP_ALIGN;
-	else
-		c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
-}
-
-/*
- * Allocate TX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
-			    dma_addr_t base, void __iomem * mmio_txp_ring)
-{
-	struct c2_tx_desc *tx_desc;
-	struct c2_txp_desc __iomem *txp_desc;
-	struct c2_element *elem;
-	int i;
-
-	tx_ring->start = kmalloc_array(tx_ring->count, sizeof(*elem),
-				       GFP_KERNEL);
-	if (!tx_ring->start)
-		return -ENOMEM;
-
-	elem = tx_ring->start;
-	tx_desc = vaddr;
-	txp_desc = mmio_txp_ring;
-	for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
-		tx_desc->len = 0;
-		tx_desc->status = 0;
-
-		/* Set TXP_HTXD_UNINIT */
-		__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
-			     (void __iomem *) txp_desc + C2_TXP_ADDR);
-		__raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
-		__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
-			     (void __iomem *) txp_desc + C2_TXP_FLAGS);
-
-		elem->skb = NULL;
-		elem->ht_desc = tx_desc;
-		elem->hw_desc = txp_desc;
-
-		if (i == tx_ring->count - 1) {
-			elem->next = tx_ring->start;
-			tx_desc->next_offset = base;
-		} else {
-			elem->next = elem + 1;
-			tx_desc->next_offset =
-			    base + (i + 1) * sizeof(*tx_desc);
-		}
-	}
-
-	tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
-
-	return 0;
-}
-
-/*
- * Allocate RX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
-			    dma_addr_t base, void __iomem * mmio_rxp_ring)
-{
-	struct c2_rx_desc *rx_desc;
-	struct c2_rxp_desc __iomem *rxp_desc;
-	struct c2_element *elem;
-	int i;
-
-	rx_ring->start = kmalloc_array(rx_ring->count, sizeof(*elem),
-				       GFP_KERNEL);
-	if (!rx_ring->start)
-		return -ENOMEM;
-
-	elem = rx_ring->start;
-	rx_desc = vaddr;
-	rxp_desc = mmio_rxp_ring;
-	for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
-		rx_desc->len = 0;
-		rx_desc->status = 0;
-
-		/* Set RXP_HRXD_UNINIT */
-		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
-		       (void __iomem *) rxp_desc + C2_RXP_STATUS);
-		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
-		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
-		__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
-			     (void __iomem *) rxp_desc + C2_RXP_ADDR);
-		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
-			     (void __iomem *) rxp_desc + C2_RXP_FLAGS);
-
-		elem->skb = NULL;
-		elem->ht_desc = rx_desc;
-		elem->hw_desc = rxp_desc;
-
-		if (i == rx_ring->count - 1) {
-			elem->next = rx_ring->start;
-			rx_desc->next_offset = base;
-		} else {
-			elem->next = elem + 1;
-			rx_desc->next_offset =
-			    base + (i + 1) * sizeof(*rx_desc);
-		}
-	}
-
-	rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
-
-	return 0;
-}
-
-/* Setup buffer for receiving */
-static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
-{
-	struct c2_dev *c2dev = c2_port->c2dev;
-	struct c2_rx_desc *rx_desc = elem->ht_desc;
-	struct sk_buff *skb;
-	dma_addr_t mapaddr;
-	u32 maplen;
-	struct c2_rxp_hdr *rxp_hdr;
-
-	skb = dev_alloc_skb(c2_port->rx_buf_size);
-	if (unlikely(!skb)) {
-		pr_debug("%s: out of memory for receive\n",
-			c2_port->netdev->name);
-		return -ENOMEM;
-	}
-
-	/* Zero out the rxp hdr in the sk_buff */
-	memset(skb->data, 0, sizeof(*rxp_hdr));
-
-	skb->dev = c2_port->netdev;
-
-	maplen = c2_port->rx_buf_size;
-	mapaddr =
-	    pci_map_single(c2dev->pcidev, skb->data, maplen,
-			   PCI_DMA_FROMDEVICE);
-
-	/* Set the sk_buff RXP_header to RXP_HRXD_READY */
-	rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-	rxp_hdr->flags = RXP_HRXD_READY;
-
-	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-	__raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
-		     elem->hw_desc + C2_RXP_LEN);
-	__raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
-	__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-		     elem->hw_desc + C2_RXP_FLAGS);
-
-	elem->skb = skb;
-	elem->mapaddr = mapaddr;
-	elem->maplen = maplen;
-	rx_desc->len = maplen;
-
-	return 0;
-}
-
-/*
- * Allocate buffers for the Rx ring
- * For receive:  rx_ring.to_clean is next received frame
- */
-static int c2_rx_fill(struct c2_port *c2_port)
-{
-	struct c2_ring *rx_ring = &c2_port->rx_ring;
-	struct c2_element *elem;
-	int ret = 0;
-
-	elem = rx_ring->start;
-	do {
-		if (c2_rx_alloc(c2_port, elem)) {
-			ret = 1;
-			break;
-		}
-	} while ((elem = elem->next) != rx_ring->start);
-
-	rx_ring->to_clean = rx_ring->start;
-	return ret;
-}
-
-/* Free all buffers in RX ring, assumes receiver stopped */
-static void c2_rx_clean(struct c2_port *c2_port)
-{
-	struct c2_dev *c2dev = c2_port->c2dev;
-	struct c2_ring *rx_ring = &c2_port->rx_ring;
-	struct c2_element *elem;
-	struct c2_rx_desc *rx_desc;
-
-	elem = rx_ring->start;
-	do {
-		rx_desc = elem->ht_desc;
-		rx_desc->len = 0;
-
-		__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-		__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
-		__raw_writew(0, elem->hw_desc + C2_RXP_LEN);
-		__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
-			     elem->hw_desc + C2_RXP_ADDR);
-		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
-			     elem->hw_desc + C2_RXP_FLAGS);
-
-		if (elem->skb) {
-			pci_unmap_single(c2dev->pcidev, elem->mapaddr,
-					 elem->maplen, PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(elem->skb);
-			elem->skb = NULL;
-		}
-	} while ((elem = elem->next) != rx_ring->start);
-}
-
-static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
-{
-	struct c2_tx_desc *tx_desc = elem->ht_desc;
-
-	tx_desc->len = 0;
-
-	pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
-			 PCI_DMA_TODEVICE);
-
-	if (elem->skb) {
-		dev_kfree_skb_any(elem->skb);
-		elem->skb = NULL;
-	}
-
-	return 0;
-}
-
-/* Free all buffers in TX ring, assumes transmitter stopped */
-static void c2_tx_clean(struct c2_port *c2_port)
-{
-	struct c2_ring *tx_ring = &c2_port->tx_ring;
-	struct c2_element *elem;
-	struct c2_txp_desc txp_htxd;
-	int retry;
-	unsigned long flags;
-
-	spin_lock_irqsave(&c2_port->tx_lock, flags);
-
-	elem = tx_ring->start;
-
-	do {
-		retry = 0;
-		do {
-			txp_htxd.flags =
-			    readw(elem->hw_desc + C2_TXP_FLAGS);
-
-			if (txp_htxd.flags == TXP_HTXD_READY) {
-				retry = 1;
-				__raw_writew(0,
-					     elem->hw_desc + C2_TXP_LEN);
-				__raw_writeq(0,
-					     elem->hw_desc + C2_TXP_ADDR);
-				__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
-					     elem->hw_desc + C2_TXP_FLAGS);
-				c2_port->netdev->stats.tx_dropped++;
-				break;
-			} else {
-				__raw_writew(0,
-					     elem->hw_desc + C2_TXP_LEN);
-				__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
-					     elem->hw_desc + C2_TXP_ADDR);
-				__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
-					     elem->hw_desc + C2_TXP_FLAGS);
-			}
-
-			c2_tx_free(c2_port->c2dev, elem);
-
-		} while ((elem = elem->next) != tx_ring->start);
-	} while (retry);
-
-	c2_port->tx_avail = c2_port->tx_ring.count - 1;
-	c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
-
-	if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
-		netif_wake_queue(c2_port->netdev);
-
-	spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-}
-
-/*
- * Process transmit descriptors marked 'DONE' by the firmware,
- * freeing up their unneeded sk_buffs.
- */
-static void c2_tx_interrupt(struct net_device *netdev)
-{
-	struct c2_port *c2_port = netdev_priv(netdev);
-	struct c2_dev *c2dev = c2_port->c2dev;
-	struct c2_ring *tx_ring = &c2_port->tx_ring;
-	struct c2_element *elem;
-	struct c2_txp_desc txp_htxd;
-
-	spin_lock(&c2_port->tx_lock);
-
-	for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
-	     elem = elem->next) {
-		txp_htxd.flags =
-		    be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
-
-		if (txp_htxd.flags != TXP_HTXD_DONE)
-			break;
-
-		if (netif_msg_tx_done(c2_port)) {
-			/* PCI reads are expensive in fast path */
-			txp_htxd.len =
-			    be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
-			pr_debug("%s: tx done slot %3Zu status 0x%x len "
-				"%5u bytes\n",
-				netdev->name, elem - tx_ring->start,
-				txp_htxd.flags, txp_htxd.len);
-		}
-
-		c2_tx_free(c2dev, elem);
-		++(c2_port->tx_avail);
-	}
-
-	tx_ring->to_clean = elem;
-
-	if (netif_queue_stopped(netdev)
-	    && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
-		netif_wake_queue(netdev);
-
-	spin_unlock(&c2_port->tx_lock);
-}
-
-static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
-{
-	struct c2_rx_desc *rx_desc = elem->ht_desc;
-	struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-
-	if (rxp_hdr->status != RXP_HRXD_OK ||
-	    rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
-		pr_debug("BAD RXP_HRXD\n");
-		pr_debug("  rx_desc : %p\n", rx_desc);
-		pr_debug("    index : %Zu\n",
-			elem - c2_port->rx_ring.start);
-		pr_debug("    len   : %u\n", rx_desc->len);
-		pr_debug("  rxp_hdr : %p [PA %p]\n", rxp_hdr,
-			(void *) __pa((unsigned long) rxp_hdr));
-		pr_debug("    flags : 0x%x\n", rxp_hdr->flags);
-		pr_debug("    status: 0x%x\n", rxp_hdr->status);
-		pr_debug("    len   : %u\n", rxp_hdr->len);
-		pr_debug("    rsvd  : 0x%x\n", rxp_hdr->rsvd);
-	}
-
-	/* Setup the skb for reuse since we're dropping this pkt */
-	elem->skb->data = elem->skb->head;
-	skb_reset_tail_pointer(elem->skb);
-
-	/* Zero out the rxp hdr in the sk_buff */
-	memset(elem->skb->data, 0, sizeof(*rxp_hdr));
-
-	/* Write the descriptor to the adapter's rx ring */
-	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-	__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
-	__raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
-		     elem->hw_desc + C2_RXP_LEN);
-	__raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
-		     elem->hw_desc + C2_RXP_ADDR);
-	__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-		     elem->hw_desc + C2_RXP_FLAGS);
-
-	pr_debug("packet dropped\n");
-	c2_port->netdev->stats.rx_dropped++;
-}
-
-static void c2_rx_interrupt(struct net_device *netdev)
-{
-	struct c2_port *c2_port = netdev_priv(netdev);
-	struct c2_dev *c2dev = c2_port->c2dev;
-	struct c2_ring *rx_ring = &c2_port->rx_ring;
-	struct c2_element *elem;
-	struct c2_rx_desc *rx_desc;
-	struct c2_rxp_hdr *rxp_hdr;
-	struct sk_buff *skb;
-	dma_addr_t mapaddr;
-	u32 maplen, buflen;
-	unsigned long flags;
-
-	spin_lock_irqsave(&c2dev->lock, flags);
-
-	/* Begin where we left off */
-	rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
-
-	for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
-	     elem = elem->next) {
-		rx_desc = elem->ht_desc;
-		mapaddr = elem->mapaddr;
-		maplen = elem->maplen;
-		skb = elem->skb;
-		rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-
-		if (rxp_hdr->flags != RXP_HRXD_DONE)
-			break;
-		buflen = rxp_hdr->len;
-
-		/* Sanity check the RXP header */
-		if (rxp_hdr->status != RXP_HRXD_OK ||
-		    buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
-			c2_rx_error(c2_port, elem);
-			continue;
-		}
-
-		/*
-		 * Allocate and map a new skb for replenishing the host
-		 * RX desc
-		 */
-		if (c2_rx_alloc(c2_port, elem)) {
-			c2_rx_error(c2_port, elem);
-			continue;
-		}
-
-		/* Unmap the old skb */
-		pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
-				 PCI_DMA_FROMDEVICE);
-
-		prefetch(skb->data);
-
-		/*
-		 * Skip past the leading 8 bytes comprising of the
-		 * "struct c2_rxp_hdr", prepended by the adapter
-		 * to the usual Ethernet header ("struct ethhdr"),
-		 * to the start of the raw Ethernet packet.
-		 *
-		 * Fix up the various fields in the sk_buff before
-		 * passing it up to netif_rx(). The transfer size
-		 * (in bytes) specified by the adapter len field of
-		 * the "struct rxp_hdr_t" does NOT include the
-		 * "sizeof(struct c2_rxp_hdr)".
-		 */
-		skb->data += sizeof(*rxp_hdr);
-		skb_set_tail_pointer(skb, buflen);
-		skb->len = buflen;
-		skb->protocol = eth_type_trans(skb, netdev);
-
-		netif_rx(skb);
-
-		netdev->stats.rx_packets++;
-		netdev->stats.rx_bytes += buflen;
-	}
-
-	/* Save where we left off */
-	rx_ring->to_clean = elem;
-	c2dev->cur_rx = elem - rx_ring->start;
-	C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
-	spin_unlock_irqrestore(&c2dev->lock, flags);
-}
-
-/*
- * Handle netisr0 TX & RX interrupts.
- */
-static irqreturn_t c2_interrupt(int irq, void *dev_id)
-{
-	unsigned int netisr0, dmaisr;
-	int handled = 0;
-	struct c2_dev *c2dev = dev_id;
-
-	/* Process CCILNET interrupts */
-	netisr0 = readl(c2dev->regs + C2_NISR0);
-	if (netisr0) {
-
-		/*
-		 * There is an issue with the firmware that always
-		 * provides the status of RX for both TX & RX
-		 * interrupts.  So process both queues here.
-		 */
-		c2_rx_interrupt(c2dev->netdev);
-		c2_tx_interrupt(c2dev->netdev);
-
-		/* Clear the interrupt */
-		writel(netisr0, c2dev->regs + C2_NISR0);
-		handled++;
-	}
-
-	/* Process RNIC interrupts */
-	dmaisr = readl(c2dev->regs + C2_DISR);
-	if (dmaisr) {
-		writel(dmaisr, c2dev->regs + C2_DISR);
-		c2_rnic_interrupt(c2dev);
-		handled++;
-	}
-
-	if (handled) {
-		return IRQ_HANDLED;
-	} else {
-		return IRQ_NONE;
-	}
-}
-
-static int c2_up(struct net_device *netdev)
-{
-	struct c2_port *c2_port = netdev_priv(netdev);
-	struct c2_dev *c2dev = c2_port->c2dev;
-	struct c2_element *elem;
-	struct c2_rxp_hdr *rxp_hdr;
-	struct in_device *in_dev;
-	size_t rx_size, tx_size;
-	int ret, i;
-	unsigned int netimr0;
-
-	if (netif_msg_ifup(c2_port))
-		pr_debug("%s: enabling interface\n", netdev->name);
-
-	/* Set the Rx buffer size based on MTU */
-	c2_set_rxbufsize(c2_port);
-
-	/* Allocate DMA'able memory for Tx/Rx host descriptor rings */
-	rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
-	tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
-
-	c2_port->mem_size = tx_size + rx_size;
-	c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
-					     &c2_port->dma);
-	if (c2_port->mem == NULL) {
-		pr_debug("Unable to allocate memory for "
-			"host descriptor rings\n");
-		return -ENOMEM;
-	}
-
-	/* Create the Rx host descriptor ring */
-	if ((ret =
-	     c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
-			      c2dev->mmio_rxp_ring))) {
-		pr_debug("Unable to create RX ring\n");
-		goto bail0;
-	}
-
-	/* Allocate Rx buffers for the host descriptor ring */
-	if (c2_rx_fill(c2_port)) {
-		pr_debug("Unable to fill RX ring\n");
-		goto bail1;
-	}
-
-	/* Create the Tx host descriptor ring */
-	if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
-				    c2_port->dma + rx_size,
-				    c2dev->mmio_txp_ring))) {
-		pr_debug("Unable to create TX ring\n");
-		goto bail1;
-	}
-
-	/* Set the TX pointer to where we left off */
-	c2_port->tx_avail = c2_port->tx_ring.count - 1;
-	c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
-	    c2_port->tx_ring.start + c2dev->cur_tx;
-
-	/* missing: Initialize MAC */
-
-	BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
-
-	/* Reset the adapter, ensures the driver is in sync with the RXP */
-	c2_reset(c2_port);
-
-	/* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
-	for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
-	     i++, elem++) {
-		rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-		rxp_hdr->flags = 0;
-		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-			     elem->hw_desc + C2_RXP_FLAGS);
-	}
-
-	/* Enable network packets */
-	netif_start_queue(netdev);
-
-	/* Enable IRQ */
-	writel(0, c2dev->regs + C2_IDIS);
-	netimr0 = readl(c2dev->regs + C2_NIMR0);
-	netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
-	writel(netimr0, c2dev->regs + C2_NIMR0);
-
-	/* Tell the stack to ignore arp requests for ipaddrs bound to
-	 * other interfaces.  This is needed to prevent the host stack
-	 * from responding to arp requests to the ipaddr bound on the
-	 * rdma interface.
-	 */
-	in_dev = in_dev_get(netdev);
-	IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
-	in_dev_put(in_dev);
-
-	return 0;
-
-bail1:
-	c2_rx_clean(c2_port);
-	kfree(c2_port->rx_ring.start);
-
-bail0:
-	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
-			    c2_port->dma);
-
-	return ret;
-}
-
-static int c2_down(struct net_device *netdev)
-{
-	struct c2_port *c2_port = netdev_priv(netdev);
-	struct c2_dev *c2dev = c2_port->c2dev;
-
-	if (netif_msg_ifdown(c2_port))
-		pr_debug("%s: disabling interface\n",
-			netdev->name);
-
-	/* Wait for all the queued packets to get sent */
-	c2_tx_interrupt(netdev);
-
-	/* Disable network packets */
-	netif_stop_queue(netdev);
-
-	/* Disable IRQs by clearing the interrupt mask */
-	writel(1, c2dev->regs + C2_IDIS);
-	writel(0, c2dev->regs + C2_NIMR0);
-
-	/* missing: Stop transmitter */
-
-	/* missing: Stop receiver */
-
-	/* Reset the adapter, ensures the driver is in sync with the RXP */
-	c2_reset(c2_port);
-
-	/* missing: Turn off LEDs here */
-
-	/* Free all buffers in the host descriptor rings */
-	c2_tx_clean(c2_port);
-	c2_rx_clean(c2_port);
-
-	/* Free the host descriptor rings */
-	kfree(c2_port->rx_ring.start);
-	kfree(c2_port->tx_ring.start);
-	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
-			    c2_port->dma);
-
-	return 0;
-}
-
-static void c2_reset(struct c2_port *c2_port)
-{
-	struct c2_dev *c2dev = c2_port->c2dev;
-	unsigned int cur_rx = c2dev->cur_rx;
-
-	/* Tell the hardware to quiesce */
-	C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
-
-	/*
-	 * The hardware will reset the C2_PCI_HRX_QUI bit once
-	 * the RXP is quiesced.  Wait 2 seconds for this.
-	 */
-	ssleep(2);
-
-	cur_rx = C2_GET_CUR_RX(c2dev);
-
-	if (cur_rx & C2_PCI_HRX_QUI)
-		pr_debug("c2_reset: failed to quiesce the hardware!\n");
-
-	cur_rx &= ~C2_PCI_HRX_QUI;
-
-	c2dev->cur_rx = cur_rx;
-
-	pr_debug("Current RX: %u\n", c2dev->cur_rx);
-}
-
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-	struct c2_port *c2_port = netdev_priv(netdev);
-	struct c2_dev *c2dev = c2_port->c2dev;
-	struct c2_ring *tx_ring = &c2_port->tx_ring;
-	struct c2_element *elem;
-	dma_addr_t mapaddr;
-	u32 maplen;
-	unsigned long flags;
-	unsigned int i;
-
-	spin_lock_irqsave(&c2_port->tx_lock, flags);
-
-	if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
-		netif_stop_queue(netdev);
-		spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
-		pr_debug("%s: Tx ring full when queue awake!\n",
-			netdev->name);
-		return NETDEV_TX_BUSY;
-	}
-
-	maplen = skb_headlen(skb);
-	mapaddr =
-	    pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
-
-	elem = tx_ring->to_use;
-	elem->skb = skb;
-	elem->mapaddr = mapaddr;
-	elem->maplen = maplen;
-
-	/* Tell HW to xmit */
-	__raw_writeq((__force u64) cpu_to_be64(mapaddr),
-		     elem->hw_desc + C2_TXP_ADDR);
-	__raw_writew((__force u16) cpu_to_be16(maplen),
-		     elem->hw_desc + C2_TXP_LEN);
-	__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
-		     elem->hw_desc + C2_TXP_FLAGS);
-
-	netdev->stats.tx_packets++;
-	netdev->stats.tx_bytes += maplen;
-
-	/* Loop thru additional data fragments and queue them */
-	if (skb_shinfo(skb)->nr_frags) {
-		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-			maplen = skb_frag_size(frag);
-			mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
-						   0, maplen, DMA_TO_DEVICE);
-			elem = elem->next;
-			elem->skb = NULL;
-			elem->mapaddr = mapaddr;
-			elem->maplen = maplen;
-
-			/* Tell HW to xmit */
-			__raw_writeq((__force u64) cpu_to_be64(mapaddr),
-				     elem->hw_desc + C2_TXP_ADDR);
-			__raw_writew((__force u16) cpu_to_be16(maplen),
-				     elem->hw_desc + C2_TXP_LEN);
-			__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
-				     elem->hw_desc + C2_TXP_FLAGS);
-
-			netdev->stats.tx_packets++;
-			netdev->stats.tx_bytes += maplen;
-		}
-	}
-
-	tx_ring->to_use = elem->next;
-	c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
-
-	if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
-		netif_stop_queue(netdev);
-		if (netif_msg_tx_queued(c2_port))
-			pr_debug("%s: transmit queue full\n",
-				netdev->name);
-	}
-
-	spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
-	netdev->trans_start = jiffies;
-
-	return NETDEV_TX_OK;
-}
-
-static void c2_tx_timeout(struct net_device *netdev)
-{
-	struct c2_port *c2_port = netdev_priv(netdev);
-
-	if (netif_msg_timer(c2_port))
-		pr_debug("%s: tx timeout\n", netdev->name);
-
-	c2_tx_clean(c2_port);
-}
-
-static int c2_change_mtu(struct net_device *netdev, int new_mtu)
-{
-	int ret = 0;
-
-	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
-		return -EINVAL;
-
-	netdev->mtu = new_mtu;
-
-	if (netif_running(netdev)) {
-		c2_down(netdev);
-
-		c2_up(netdev);
-	}
-
-	return ret;
-}
-
-static const struct net_device_ops c2_netdev = {
-	.ndo_open 		= c2_up,
-	.ndo_stop 		= c2_down,
-	.ndo_start_xmit		= c2_xmit_frame,
-	.ndo_tx_timeout		= c2_tx_timeout,
-	.ndo_change_mtu		= c2_change_mtu,
-	.ndo_set_mac_address 	= eth_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-/* Initialize network device */
-static struct net_device *c2_devinit(struct c2_dev *c2dev,
-				     void __iomem * mmio_addr)
-{
-	struct c2_port *c2_port = NULL;
-	struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
-
-	if (!netdev) {
-		pr_debug("c2_port etherdev alloc failed");
-		return NULL;
-	}
-
-	SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
-	netdev->netdev_ops = &c2_netdev;
-	netdev->watchdog_timeo = C2_TX_TIMEOUT;
-	netdev->irq = c2dev->pcidev->irq;
-
-	c2_port = netdev_priv(netdev);
-	c2_port->netdev = netdev;
-	c2_port->c2dev = c2dev;
-	c2_port->msg_enable = netif_msg_init(debug, default_msg);
-	c2_port->tx_ring.count = C2_NUM_TX_DESC;
-	c2_port->rx_ring.count = C2_NUM_RX_DESC;
-
-	spin_lock_init(&c2_port->tx_lock);
-
-	/* Copy our 48-bit ethernet hardware address */
-	memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
-
-	/* Validate the MAC address */
-	if (!is_valid_ether_addr(netdev->dev_addr)) {
-		pr_debug("Invalid MAC Address\n");
-		pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name,
-			 netdev->dev_addr, netdev->irq);
-		free_netdev(netdev);
-		return NULL;
-	}
-
-	c2dev->netdev = netdev;
-
-	return netdev;
-}
-
-static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
-{
-	int ret = 0, i;
-	unsigned long reg0_start, reg0_flags, reg0_len;
-	unsigned long reg2_start, reg2_flags, reg2_len;
-	unsigned long reg4_start, reg4_flags, reg4_len;
-	unsigned kva_map_size;
-	struct net_device *netdev = NULL;
-	struct c2_dev *c2dev = NULL;
-	void __iomem *mmio_regs = NULL;
-
-	printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
-		DRV_VERSION);
-
-	/* Enable PCI device */
-	ret = pci_enable_device(pcidev);
-	if (ret) {
-		printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
-			pci_name(pcidev));
-		goto bail0;
-	}
-
-	reg0_start = pci_resource_start(pcidev, BAR_0);
-	reg0_len = pci_resource_len(pcidev, BAR_0);
-	reg0_flags = pci_resource_flags(pcidev, BAR_0);
-
-	reg2_start = pci_resource_start(pcidev, BAR_2);
-	reg2_len = pci_resource_len(pcidev, BAR_2);
-	reg2_flags = pci_resource_flags(pcidev, BAR_2);
-
-	reg4_start = pci_resource_start(pcidev, BAR_4);
-	reg4_len = pci_resource_len(pcidev, BAR_4);
-	reg4_flags = pci_resource_flags(pcidev, BAR_4);
-
-	pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
-	pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
-	pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
-
-	/* Make sure PCI base addr are MMIO */
-	if (!(reg0_flags & IORESOURCE_MEM) ||
-	    !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
-		printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
-		ret = -ENODEV;
-		goto bail1;
-	}
-
-	/* Check for weird/broken PCI region reporting */
-	if ((reg0_len < C2_REG0_SIZE) ||
-	    (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
-		printk(KERN_ERR PFX "Invalid PCI region sizes\n");
-		ret = -ENODEV;
-		goto bail1;
-	}
-
-	/* Reserve PCI I/O and memory resources */
-	ret = pci_request_regions(pcidev, DRV_NAME);
-	if (ret) {
-		printk(KERN_ERR PFX "%s: Unable to request regions\n",
-			pci_name(pcidev));
-		goto bail1;
-	}
-
-	if ((sizeof(dma_addr_t) > 4)) {
-		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
-		if (ret < 0) {
-			printk(KERN_ERR PFX "64b DMA configuration failed\n");
-			goto bail2;
-		}
-	} else {
-		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
-		if (ret < 0) {
-			printk(KERN_ERR PFX "32b DMA configuration failed\n");
-			goto bail2;
-		}
-	}
-
-	/* Enables bus-mastering on the device */
-	pci_set_master(pcidev);
-
-	/* Remap the adapter PCI registers in BAR4 */
-	mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
-				    sizeof(struct c2_adapter_pci_regs));
-	if (!mmio_regs) {
-		printk(KERN_ERR PFX
-			"Unable to remap adapter PCI registers in BAR4\n");
-		ret = -EIO;
-		goto bail2;
-	}
-
-	/* Validate PCI regs magic */
-	for (i = 0; i < sizeof(c2_magic); i++) {
-		if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
-			printk(KERN_ERR PFX "Downlevel Firmware boot loader "
-				"[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
-			       "utility to update your boot loader\n",
-				i + 1, sizeof(c2_magic),
-				readb(mmio_regs + C2_REGS_MAGIC + i),
-				c2_magic[i]);
-			printk(KERN_ERR PFX "Adapter not claimed\n");
-			iounmap(mmio_regs);
-			ret = -EIO;
-			goto bail2;
-		}
-	}
-
-	/* Validate the adapter version */
-	if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
-		printk(KERN_ERR PFX "Version mismatch "
-			"[fw=%u, c2=%u], Adapter not claimed\n",
-			be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
-			C2_VERSION);
-		ret = -EINVAL;
-		iounmap(mmio_regs);
-		goto bail2;
-	}
-
-	/* Validate the adapter IVN */
-	if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
-		printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
-		       "the OpenIB device support kit. "
-		       "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
-		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
-		       C2_IVN);
-		ret = -EINVAL;
-		iounmap(mmio_regs);
-		goto bail2;
-	}
-
-	/* Allocate hardware structure */
-	c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
-	if (!c2dev) {
-		printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
-			pci_name(pcidev));
-		ret = -ENOMEM;
-		iounmap(mmio_regs);
-		goto bail2;
-	}
-
-	memset(c2dev, 0, sizeof(*c2dev));
-	spin_lock_init(&c2dev->lock);
-	c2dev->pcidev = pcidev;
-	c2dev->cur_tx = 0;
-
-	/* Get the last RX index */
-	c2dev->cur_rx =
-	    (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
-	     0xffffc000) / sizeof(struct c2_rxp_desc);
-
-	/* Request an interrupt line for the driver */
-	ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
-	if (ret) {
-		printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
-			pci_name(pcidev), pcidev->irq);
-		iounmap(mmio_regs);
-		goto bail3;
-	}
-
-	/* Set driver specific data */
-	pci_set_drvdata(pcidev, c2dev);
-
-	/* Initialize network device */
-	if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
-		ret = -ENOMEM;
-		iounmap(mmio_regs);
-		goto bail4;
-	}
-
-	/* Save off the actual size prior to unmapping mmio_regs */
-	kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
-
-	/* Unmap the adapter PCI registers in BAR4 */
-	iounmap(mmio_regs);
-
-	/* Register network device */
-	ret = register_netdev(netdev);
-	if (ret) {
-		printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
-			ret);
-		goto bail5;
-	}
-
-	/* Disable network packets */
-	netif_stop_queue(netdev);
-
-	/* Remap the adapter HRXDQ PA space to kernel VA space */
-	c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
-					       C2_RXP_HRXDQ_SIZE);
-	if (!c2dev->mmio_rxp_ring) {
-		printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
-		ret = -EIO;
-		goto bail6;
-	}
-
-	/* Remap the adapter HTXDQ PA space to kernel VA space */
-	c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
-					       C2_TXP_HTXDQ_SIZE);
-	if (!c2dev->mmio_txp_ring) {
-		printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
-		ret = -EIO;
-		goto bail7;
-	}
-
-	/* Save off the current RX index in the last 4 bytes of the TXP Ring */
-	C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
-	/* Remap the PCI registers in adapter BAR0 to kernel VA space */
-	c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
-	if (!c2dev->regs) {
-		printk(KERN_ERR PFX "Unable to remap BAR0\n");
-		ret = -EIO;
-		goto bail8;
-	}
-
-	/* Remap the PCI registers in adapter BAR4 to kernel VA space */
-	c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
-	c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
-				     kva_map_size);
-	if (!c2dev->kva) {
-		printk(KERN_ERR PFX "Unable to remap BAR4\n");
-		ret = -EIO;
-		goto bail9;
-	}
-
-	/* Print out the MAC address */
-	pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr,
-		 netdev->irq);
-
-	ret = c2_rnic_init(c2dev);
-	if (ret) {
-		printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
-		goto bail10;
-	}
-
-	ret = c2_register_device(c2dev);
-	if (ret)
-		goto bail10;
-
-	return 0;
-
- bail10:
-	iounmap(c2dev->kva);
-
- bail9:
-	iounmap(c2dev->regs);
-
- bail8:
-	iounmap(c2dev->mmio_txp_ring);
-
- bail7:
-	iounmap(c2dev->mmio_rxp_ring);
-
- bail6:
-	unregister_netdev(netdev);
-
- bail5:
-	free_netdev(netdev);
-
- bail4:
-	free_irq(pcidev->irq, c2dev);
-
- bail3:
-	ib_dealloc_device(&c2dev->ibdev);
-
- bail2:
-	pci_release_regions(pcidev);
-
- bail1:
-	pci_disable_device(pcidev);
-
- bail0:
-	return ret;
-}
-
-static void c2_remove(struct pci_dev *pcidev)
-{
-	struct c2_dev *c2dev = pci_get_drvdata(pcidev);
-	struct net_device *netdev = c2dev->netdev;
-
-	/* Unregister with OpenIB */
-	c2_unregister_device(c2dev);
-
-	/* Clean up the RNIC resources */
-	c2_rnic_term(c2dev);
-
-	/* Remove network device from the kernel */
-	unregister_netdev(netdev);
-
-	/* Free network device */
-	free_netdev(netdev);
-
-	/* Free the interrupt line */
-	free_irq(pcidev->irq, c2dev);
-
-	/* missing: Turn LEDs off here */
-
-	/* Unmap adapter PA space */
-	iounmap(c2dev->kva);
-	iounmap(c2dev->regs);
-	iounmap(c2dev->mmio_txp_ring);
-	iounmap(c2dev->mmio_rxp_ring);
-
-	/* Free the hardware structure */
-	ib_dealloc_device(&c2dev->ibdev);
-
-	/* Release reserved PCI I/O and memory resources */
-	pci_release_regions(pcidev);
-
-	/* Disable PCI device */
-	pci_disable_device(pcidev);
-
-	/* Clear driver specific data */
-	pci_set_drvdata(pcidev, NULL);
-}
-
-static struct pci_driver c2_pci_driver = {
-	.name = DRV_NAME,
-	.id_table = c2_pci_table,
-	.probe = c2_probe,
-	.remove = c2_remove,
-};
-
-module_pci_driver(c2_pci_driver);
diff --git a/drivers/staging/rdma/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
deleted file mode 100644
index 21b565a..0000000
--- a/drivers/staging/rdma/amso1100/c2.h
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __C2_H
-#define __C2_H
-
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-
-#include "c2_provider.h"
-#include "c2_mq.h"
-#include "c2_status.h"
-
-#define DRV_NAME     "c2"
-#define DRV_VERSION  "1.1"
-#define PFX          DRV_NAME ": "
-
-#define BAR_0                0
-#define BAR_2                2
-#define BAR_4                4
-
-#define RX_BUF_SIZE         (1536 + 8)
-#define ETH_JUMBO_MTU        9000
-#define C2_MAGIC            "CEPHEUS"
-#define C2_VERSION           4
-#define C2_IVN              (18 & 0x7fffffff)
-
-#define C2_REG0_SIZE        (16 * 1024)
-#define C2_REG2_SIZE        (2 * 1024 * 1024)
-#define C2_REG4_SIZE        (256 * 1024 * 1024)
-#define C2_NUM_TX_DESC       341
-#define C2_NUM_RX_DESC       256
-#define C2_PCI_REGS_OFFSET  (0x10000)
-#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
-#define C2_RXP_HRXDQ_SIZE   (4096)
-#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
-#define C2_TXP_HTXDQ_SIZE   (4096)
-#define C2_TX_TIMEOUT	    (6*HZ)
-
-/* CEPHEUS */
-static const u8 c2_magic[] = {
-	0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
-};
-
-enum adapter_pci_regs {
-	C2_REGS_MAGIC = 0x0000,
-	C2_REGS_VERS = 0x0008,
-	C2_REGS_IVN = 0x000C,
-	C2_REGS_PCI_WINSIZE = 0x0010,
-	C2_REGS_Q0_QSIZE = 0x0014,
-	C2_REGS_Q0_MSGSIZE = 0x0018,
-	C2_REGS_Q0_POOLSTART = 0x001C,
-	C2_REGS_Q0_SHARED = 0x0020,
-	C2_REGS_Q1_QSIZE = 0x0024,
-	C2_REGS_Q1_MSGSIZE = 0x0028,
-	C2_REGS_Q1_SHARED = 0x0030,
-	C2_REGS_Q2_QSIZE = 0x0034,
-	C2_REGS_Q2_MSGSIZE = 0x0038,
-	C2_REGS_Q2_SHARED = 0x0040,
-	C2_REGS_ENADDR = 0x004C,
-	C2_REGS_RDMA_ENADDR = 0x0054,
-	C2_REGS_HRX_CUR = 0x006C,
-};
-
-struct c2_adapter_pci_regs {
-	char reg_magic[8];
-	u32 version;
-	u32 ivn;
-	u32 pci_window_size;
-	u32 q0_q_size;
-	u32 q0_msg_size;
-	u32 q0_pool_start;
-	u32 q0_shared;
-	u32 q1_q_size;
-	u32 q1_msg_size;
-	u32 q1_pool_start;
-	u32 q1_shared;
-	u32 q2_q_size;
-	u32 q2_msg_size;
-	u32 q2_pool_start;
-	u32 q2_shared;
-	u32 log_start;
-	u32 log_size;
-	u8 host_enaddr[8];
-	u8 rdma_enaddr[8];
-	u32 crash_entry;
-	u32 crash_ready[2];
-	u32 fw_txd_cur;
-	u32 fw_hrxd_cur;
-	u32 fw_rxd_cur;
-};
-
-enum pci_regs {
-	C2_HISR = 0x0000,
-	C2_DISR = 0x0004,
-	C2_HIMR = 0x0008,
-	C2_DIMR = 0x000C,
-	C2_NISR0 = 0x0010,
-	C2_NISR1 = 0x0014,
-	C2_NIMR0 = 0x0018,
-	C2_NIMR1 = 0x001C,
-	C2_IDIS = 0x0020,
-};
-
-enum {
-	C2_PCI_HRX_INT = 1 << 8,
-	C2_PCI_HTX_INT = 1 << 17,
-	C2_PCI_HRX_QUI = 1 << 31,
-};
-
-/*
- * Cepheus registers in BAR0.
- */
-struct c2_pci_regs {
-	u32 hostisr;
-	u32 dmaisr;
-	u32 hostimr;
-	u32 dmaimr;
-	u32 netisr0;
-	u32 netisr1;
-	u32 netimr0;
-	u32 netimr1;
-	u32 int_disable;
-};
-
-/* TXP flags */
-enum c2_txp_flags {
-	TXP_HTXD_DONE = 0,
-	TXP_HTXD_READY = 1 << 0,
-	TXP_HTXD_UNINIT = 1 << 1,
-};
-
-/* RXP flags */
-enum c2_rxp_flags {
-	RXP_HRXD_UNINIT = 0,
-	RXP_HRXD_READY = 1 << 0,
-	RXP_HRXD_DONE = 1 << 1,
-};
-
-/* RXP status */
-enum c2_rxp_status {
-	RXP_HRXD_ZERO = 0,
-	RXP_HRXD_OK = 1 << 0,
-	RXP_HRXD_BUF_OV = 1 << 1,
-};
-
-/* TXP descriptor fields */
-enum txp_desc {
-	C2_TXP_FLAGS = 0x0000,
-	C2_TXP_LEN = 0x0002,
-	C2_TXP_ADDR = 0x0004,
-};
-
-/* RXP descriptor fields */
-enum rxp_desc {
-	C2_RXP_FLAGS = 0x0000,
-	C2_RXP_STATUS = 0x0002,
-	C2_RXP_COUNT = 0x0004,
-	C2_RXP_LEN = 0x0006,
-	C2_RXP_ADDR = 0x0008,
-};
-
-struct c2_txp_desc {
-	u16 flags;
-	u16 len;
-	u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_desc {
-	u16 flags;
-	u16 status;
-	u16 count;
-	u16 len;
-	u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_hdr {
-	u16 flags;
-	u16 status;
-	u16 len;
-	u16 rsvd;
-} __attribute__ ((packed));
-
-struct c2_tx_desc {
-	u32 len;
-	u32 status;
-	dma_addr_t next_offset;
-};
-
-struct c2_rx_desc {
-	u32 len;
-	u32 status;
-	dma_addr_t next_offset;
-};
-
-struct c2_alloc {
-	u32 last;
-	u32 max;
-	spinlock_t lock;
-	unsigned long *table;
-};
-
-struct c2_array {
-	struct {
-		void **page;
-		int used;
-	} *page_list;
-};
-
-/*
- * The MQ shared pointer pool is organized as a linked list of
- * chunks. Each chunk contains a linked list of free shared pointers
- * that can be allocated to a given user mode client.
- *
- */
-struct sp_chunk {
-	struct sp_chunk *next;
-	dma_addr_t dma_addr;
-	DEFINE_DMA_UNMAP_ADDR(mapping);
-	u16 head;
-	u16 shared_ptr[0];
-};
-
-struct c2_pd_table {
-	u32 last;
-	u32 max;
-	spinlock_t lock;
-	unsigned long *table;
-};
-
-struct c2_qp_table {
-	struct idr idr;
-	spinlock_t lock;
-};
-
-struct c2_element {
-	struct c2_element *next;
-	void *ht_desc;		/* host     descriptor */
-	void __iomem *hw_desc;	/* hardware descriptor */
-	struct sk_buff *skb;
-	dma_addr_t mapaddr;
-	u32 maplen;
-};
-
-struct c2_ring {
-	struct c2_element *to_clean;
-	struct c2_element *to_use;
-	struct c2_element *start;
-	unsigned long count;
-};
-
-struct c2_dev {
-	struct ib_device ibdev;
-	void __iomem *regs;
-	void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
-	void __iomem *mmio_rxp_ring;
-	spinlock_t lock;
-	struct pci_dev *pcidev;
-	struct net_device *netdev;
-	struct net_device *pseudo_netdev;
-	unsigned int cur_tx;
-	unsigned int cur_rx;
-	u32 adapter_handle;
-	int device_cap_flags;
-	void __iomem *kva;	/* KVA device memory */
-	unsigned long pa;	/* PA device memory */
-	void **qptr_array;
-
-	struct kmem_cache *host_msg_cache;
-
-	struct list_head cca_link;		/* adapter list */
-	struct list_head eh_wakeup_list;	/* event wakeup list */
-	wait_queue_head_t req_vq_wo;
-
-	/* Cached RNIC properties */
-	struct ib_device_attr props;
-
-	struct c2_pd_table pd_table;
-	struct c2_qp_table qp_table;
-	int ports;		/* num of GigE ports */
-	int devnum;
-	spinlock_t vqlock;	/* sync vbs req MQ */
-
-	/* Verbs Queues */
-	struct c2_mq req_vq;	/* Verbs Request MQ */
-	struct c2_mq rep_vq;	/* Verbs Reply MQ */
-	struct c2_mq aeq;	/* Async Events MQ */
-
-	/* Kernel client MQs */
-	struct sp_chunk *kern_mqsp_pool;
-
-	/* Device updates these values when posting messages to a host
-	 * target queue */
-	u16 req_vq_shared;
-	u16 rep_vq_shared;
-	u16 aeq_shared;
-	u16 irq_claimed;
-
-	/*
-	 * Shared host target pages for user-accessible MQs.
-	 */
-	int hthead;		/* index of first free entry */
-	void *htpages;		/* kernel vaddr */
-	int htlen;		/* length of htpages memory */
-	void *htuva;		/* user mapped vaddr */
-	spinlock_t htlock;	/* serialize allocation */
-
-	u64 adapter_hint_uva;	/* access to the activity FIFO */
-
-	//	spinlock_t aeq_lock;
-	//	spinlock_t rnic_lock;
-
-	__be16 *hint_count;
-	dma_addr_t hint_count_dma;
-	u16 hints_read;
-
-	int init;		/* TRUE if it's ready */
-	char ae_cache_name[16];
-	char vq_cache_name[16];
-};
-
-struct c2_port {
-	u32 msg_enable;
-	struct c2_dev *c2dev;
-	struct net_device *netdev;
-
-	spinlock_t tx_lock;
-	u32 tx_avail;
-	struct c2_ring tx_ring;
-	struct c2_ring rx_ring;
-
-	void *mem;		/* PCI memory for host rings */
-	dma_addr_t dma;
-	unsigned long mem_size;
-
-	u32 rx_buf_size;
-};
-
-/*
- * Activity FIFO registers in BAR0.
- */
-#define PCI_BAR0_HOST_HINT	0x100
-#define PCI_BAR0_ADAPTER_HINT	0x2000
-
-/*
- * Ammasso PCI vendor id and Cepheus PCI device id.
- */
-#define CQ_ARMED 	0x01
-#define CQ_WAIT_FOR_DMA	0x80
-
-/*
- * The format of a hint is as follows:
- * Lower 16 bits are the count of hints for the queue.
- * Next 15 bits are the qp_index
- * Upper most bit depends on who reads it:
- *    If read by producer, then it means Full (1) or Not-Full (0)
- *    If read by consumer, then it means Empty (1) or Not-Empty (0)
- */
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-
-/*
- * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
- * struct.
- */
-#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
-
-#ifndef readq
-static inline u64 readq(const void __iomem * addr)
-{
-	u64 ret = readl(addr + 4);
-	ret <<= 32;
-	ret |= readl(addr);
-
-	return ret;
-}
-#endif
-
-#ifndef writeq
-static inline void __raw_writeq(u64 val, void __iomem * addr)
-{
-	__raw_writel((u32) (val), addr);
-	__raw_writel((u32) (val >> 32), (addr + 4));
-}
-#endif
-
-#define C2_SET_CUR_RX(c2dev, cur_rx) \
-	__raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
-
-#define C2_GET_CUR_RX(c2dev) \
-	be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
-
-static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
-{
-	return container_of(ibdev, struct c2_dev, ibdev);
-}
-
-static inline int c2_errno(void *reply)
-{
-	switch (c2_wr_get_result(reply)) {
-	case C2_OK:
-		return 0;
-	case CCERR_NO_BUFS:
-	case CCERR_INSUFFICIENT_RESOURCES:
-	case CCERR_ZERO_RDMA_READ_RESOURCES:
-		return -ENOMEM;
-	case CCERR_MR_IN_USE:
-	case CCERR_QP_IN_USE:
-		return -EBUSY;
-	case CCERR_ADDR_IN_USE:
-		return -EADDRINUSE;
-	case CCERR_ADDR_NOT_AVAIL:
-		return -EADDRNOTAVAIL;
-	case CCERR_CONN_RESET:
-		return -ECONNRESET;
-	case CCERR_NOT_IMPLEMENTED:
-	case CCERR_INVALID_WQE:
-		return -ENOSYS;
-	case CCERR_QP_NOT_PRIVILEGED:
-		return -EPERM;
-	case CCERR_STACK_ERROR:
-		return -EPROTO;
-	case CCERR_ACCESS_VIOLATION:
-	case CCERR_BASE_AND_BOUNDS_VIOLATION:
-		return -EFAULT;
-	case CCERR_STAG_STATE_NOT_INVALID:
-	case CCERR_INVALID_ADDRESS:
-	case CCERR_INVALID_CQ:
-	case CCERR_INVALID_EP:
-	case CCERR_INVALID_MODIFIER:
-	case CCERR_INVALID_MTU:
-	case CCERR_INVALID_PD_ID:
-	case CCERR_INVALID_QP:
-	case CCERR_INVALID_RNIC:
-	case CCERR_INVALID_STAG:
-		return -EINVAL;
-	default:
-		return -EAGAIN;
-	}
-}
-
-/* Device */
-int c2_register_device(struct c2_dev *c2dev);
-void c2_unregister_device(struct c2_dev *c2dev);
-int c2_rnic_init(struct c2_dev *c2dev);
-void c2_rnic_term(struct c2_dev *c2dev);
-void c2_rnic_interrupt(struct c2_dev *c2dev);
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-
-/* QPs */
-int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
-		       struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
-			struct ib_qp_attr *attr, int attr_mask);
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
-				 int ord, int ird);
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
-			struct ib_send_wr **bad_wr);
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
-			   struct ib_recv_wr **bad_wr);
-void c2_init_qp_table(struct c2_dev *c2dev);
-void c2_cleanup_qp_table(struct c2_dev *c2dev);
-void c2_set_qp_state(struct c2_qp *, int);
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
-
-/* PDs */
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-int c2_init_pd_table(struct c2_dev *c2dev);
-void c2_cleanup_pd_table(struct c2_dev *c2dev);
-
-/* CQs */
-int c2_init_cq(struct c2_dev *c2dev, int entries,
-		      struct c2_ucontext *ctx, struct c2_cq *cq);
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-
-/* CM */
-int c2_llp_connect(struct iw_cm_id *cm_id,
-			  struct iw_cm_conn_param *iw_param);
-int c2_llp_accept(struct iw_cm_id *cm_id,
-			 struct iw_cm_conn_param *iw_param);
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
-			 u8 pdata_len);
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
-int c2_llp_service_destroy(struct iw_cm_id *cm_id);
-
-/* MM */
-int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
- 				      int page_size, int pbl_depth, u32 length,
- 				      u32 off, u64 *va, enum c2_acf acf,
-				      struct c2_mr *mr);
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
-
-/* AE */
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
-
-/* MQSP Allocator */
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
-			     struct sp_chunk **root);
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-			     dma_addr_t *dma_addr, gfp_t gfp_mask);
-void c2_free_mqsp(__be16* mqsp);
-#endif
diff --git a/drivers/staging/rdma/amso1100/c2_ae.c b/drivers/staging/rdma/amso1100/c2_ae.c
deleted file mode 100644
index eb7a92b..0000000
--- a/drivers/staging/rdma/amso1100/c2_ae.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_status.h"
-#include "c2_ae.h"
-
-static int c2_convert_cm_status(u32 c2_status)
-{
-	switch (c2_status) {
-	case C2_CONN_STATUS_SUCCESS:
-		return 0;
-	case C2_CONN_STATUS_REJECTED:
-		return -ENETRESET;
-	case C2_CONN_STATUS_REFUSED:
-		return -ECONNREFUSED;
-	case C2_CONN_STATUS_TIMEDOUT:
-		return -ETIMEDOUT;
-	case C2_CONN_STATUS_NETUNREACH:
-		return -ENETUNREACH;
-	case C2_CONN_STATUS_HOSTUNREACH:
-		return -EHOSTUNREACH;
-	case C2_CONN_STATUS_INVALID_RNIC:
-		return -EINVAL;
-	case C2_CONN_STATUS_INVALID_QP:
-		return -EINVAL;
-	case C2_CONN_STATUS_INVALID_QP_STATE:
-		return -EINVAL;
-	case C2_CONN_STATUS_ADDR_NOT_AVAIL:
-		return -EADDRNOTAVAIL;
-	default:
-		printk(KERN_ERR PFX
-		       "%s - Unable to convert CM status: %d\n",
-		       __func__, c2_status);
-		return -EIO;
-	}
-}
-
-static const char* to_event_str(int event)
-{
-	static const char* event_str[] = {
-		"CCAE_REMOTE_SHUTDOWN",
-		"CCAE_ACTIVE_CONNECT_RESULTS",
-		"CCAE_CONNECTION_REQUEST",
-		"CCAE_LLP_CLOSE_COMPLETE",
-		"CCAE_TERMINATE_MESSAGE_RECEIVED",
-		"CCAE_LLP_CONNECTION_RESET",
-		"CCAE_LLP_CONNECTION_LOST",
-		"CCAE_LLP_SEGMENT_SIZE_INVALID",
-		"CCAE_LLP_INVALID_CRC",
-		"CCAE_LLP_BAD_FPDU",
-		"CCAE_INVALID_DDP_VERSION",
-		"CCAE_INVALID_RDMA_VERSION",
-		"CCAE_UNEXPECTED_OPCODE",
-		"CCAE_INVALID_DDP_QUEUE_NUMBER",
-		"CCAE_RDMA_READ_NOT_ENABLED",
-		"CCAE_RDMA_WRITE_NOT_ENABLED",
-		"CCAE_RDMA_READ_TOO_SMALL",
-		"CCAE_NO_L_BIT",
-		"CCAE_TAGGED_INVALID_STAG",
-		"CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
-		"CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
-		"CCAE_TAGGED_INVALID_PD",
-		"CCAE_WRAP_ERROR",
-		"CCAE_BAD_CLOSE",
-		"CCAE_BAD_LLP_CLOSE",
-		"CCAE_INVALID_MSN_RANGE",
-		"CCAE_INVALID_MSN_GAP",
-		"CCAE_IRRQ_OVERFLOW",
-		"CCAE_IRRQ_MSN_GAP",
-		"CCAE_IRRQ_MSN_RANGE",
-		"CCAE_IRRQ_INVALID_STAG",
-		"CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
-		"CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
-		"CCAE_IRRQ_INVALID_PD",
-		"CCAE_IRRQ_WRAP_ERROR",
-		"CCAE_CQ_SQ_COMPLETION_OVERFLOW",
-		"CCAE_CQ_RQ_COMPLETION_ERROR",
-		"CCAE_QP_SRQ_WQE_ERROR",
-		"CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
-		"CCAE_CQ_OVERFLOW",
-		"CCAE_CQ_OPERATION_ERROR",
-		"CCAE_SRQ_LIMIT_REACHED",
-		"CCAE_QP_RQ_LIMIT_REACHED",
-		"CCAE_SRQ_CATASTROPHIC_ERROR",
-		"CCAE_RNIC_CATASTROPHIC_ERROR"
-	};
-
-	if (event < CCAE_REMOTE_SHUTDOWN ||
-	    event > CCAE_RNIC_CATASTROPHIC_ERROR)
-		return "<invalid event>";
-
-	event -= CCAE_REMOTE_SHUTDOWN;
-	return event_str[event];
-}
-
-static const char *to_qp_state_str(int state)
-{
-	switch (state) {
-	case C2_QP_STATE_IDLE:
-		return "C2_QP_STATE_IDLE";
-	case C2_QP_STATE_CONNECTING:
-		return "C2_QP_STATE_CONNECTING";
-	case C2_QP_STATE_RTS:
-		return "C2_QP_STATE_RTS";
-	case C2_QP_STATE_CLOSING:
-		return "C2_QP_STATE_CLOSING";
-	case C2_QP_STATE_TERMINATE:
-		return "C2_QP_STATE_TERMINATE";
-	case C2_QP_STATE_ERROR:
-		return "C2_QP_STATE_ERROR";
-	default:
-		return "<invalid QP state>";
-	}
-}
-
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
-{
-	struct c2_mq *mq = c2dev->qptr_array[mq_index];
-	union c2wr *wr;
-	void *resource_user_context;
-	struct iw_cm_event cm_event;
-	struct ib_event ib_event;
-	enum c2_resource_indicator resource_indicator;
-	enum c2_event_id event_id;
-	unsigned long flags;
-	int status;
-	struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
-	struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
-
-	/*
-	 * retrieve the message
-	 */
-	wr = c2_mq_consume(mq);
-	if (!wr)
-		return;
-
-	memset(&ib_event, 0, sizeof(ib_event));
-	memset(&cm_event, 0, sizeof(cm_event));
-
-	event_id = c2_wr_get_id(wr);
-	resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
-	resource_user_context =
-	    (void *) (unsigned long) wr->ae.ae_generic.user_context;
-
-	status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
-
-	pr_debug("event received c2_dev=%p, event_id=%d, "
-		"resource_indicator=%d, user_context=%p, status = %d\n",
-		c2dev, event_id, resource_indicator, resource_user_context,
-		status);
-
-	switch (resource_indicator) {
-	case C2_RES_IND_QP:{
-
-		struct c2_qp *qp = resource_user_context;
-		struct iw_cm_id *cm_id = qp->cm_id;
-		struct c2wr_ae_active_connect_results *res;
-
-		if (!cm_id) {
-			pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
-				qp);
-			goto ignore_it;
-		}
-		pr_debug("%s: event = %s, user_context=%llx, "
-			"resource_type=%x, "
-			"resource=%x, qp_state=%s\n",
-			__func__,
-			to_event_str(event_id),
-			(unsigned long long) wr->ae.ae_generic.user_context,
-			be32_to_cpu(wr->ae.ae_generic.resource_type),
-			be32_to_cpu(wr->ae.ae_generic.resource),
-			to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
-
-		c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
-
-		switch (event_id) {
-		case CCAE_ACTIVE_CONNECT_RESULTS:
-			res = &wr->ae.ae_active_connect_results;
-			cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
-			laddr->sin_addr.s_addr = res->laddr;
-			raddr->sin_addr.s_addr = res->raddr;
-			laddr->sin_port = res->lport;
-			raddr->sin_port = res->rport;
-			if (status == 0) {
-				cm_event.private_data_len =
-					be32_to_cpu(res->private_data_length);
-				cm_event.private_data = res->private_data;
-			} else {
-				spin_lock_irqsave(&qp->lock, flags);
-				if (qp->cm_id) {
-					qp->cm_id->rem_ref(qp->cm_id);
-					qp->cm_id = NULL;
-				}
-				spin_unlock_irqrestore(&qp->lock, flags);
-				cm_event.private_data_len = 0;
-				cm_event.private_data = NULL;
-			}
-			if (cm_id->event_handler)
-				cm_id->event_handler(cm_id, &cm_event);
-			break;
-		case CCAE_TERMINATE_MESSAGE_RECEIVED:
-		case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
-			ib_event.device = &c2dev->ibdev;
-			ib_event.element.qp = &qp->ibqp;
-			ib_event.event = IB_EVENT_QP_REQ_ERR;
-
-			if (qp->ibqp.event_handler)
-				qp->ibqp.event_handler(&ib_event,
-						       qp->ibqp.
-						       qp_context);
-			break;
-		case CCAE_BAD_CLOSE:
-		case CCAE_LLP_CLOSE_COMPLETE:
-		case CCAE_LLP_CONNECTION_RESET:
-		case CCAE_LLP_CONNECTION_LOST:
-			BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
-
-			spin_lock_irqsave(&qp->lock, flags);
-			if (qp->cm_id) {
-				qp->cm_id->rem_ref(qp->cm_id);
-				qp->cm_id = NULL;
-			}
-			spin_unlock_irqrestore(&qp->lock, flags);
-			cm_event.event = IW_CM_EVENT_CLOSE;
-			cm_event.status = 0;
-			if (cm_id->event_handler)
-				cm_id->event_handler(cm_id, &cm_event);
-			break;
-		default:
-			BUG_ON(1);
-			pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
-				"CM_ID=%p\n",
-				__func__, __LINE__,
-				event_id, qp, cm_id);
-			break;
-		}
-		break;
-	}
-
-	case C2_RES_IND_EP:{
-
-		struct c2wr_ae_connection_request *req =
-			&wr->ae.ae_connection_request;
-		struct iw_cm_id *cm_id =
-			resource_user_context;
-
-		pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
-		if (event_id != CCAE_CONNECTION_REQUEST) {
-			pr_debug("%s: Invalid event_id: %d\n",
-				__func__, event_id);
-			break;
-		}
-		cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
-		cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
-		laddr->sin_addr.s_addr = req->laddr;
-		raddr->sin_addr.s_addr = req->raddr;
-		laddr->sin_port = req->lport;
-		raddr->sin_port = req->rport;
-		cm_event.private_data_len =
-			be32_to_cpu(req->private_data_length);
-		cm_event.private_data = req->private_data;
-		/*
-		 * Until ird/ord negotiation via MPAv2 support is added, send
-		 * max supported values
-		 */
-		cm_event.ird = cm_event.ord = 128;
-
-		if (cm_id->event_handler)
-			cm_id->event_handler(cm_id, &cm_event);
-		break;
-	}
-
-	case C2_RES_IND_CQ:{
-		struct c2_cq *cq =
-		    resource_user_context;
-
-		pr_debug("IB_EVENT_CQ_ERR\n");
-		ib_event.device = &c2dev->ibdev;
-		ib_event.element.cq = &cq->ibcq;
-		ib_event.event = IB_EVENT_CQ_ERR;
-
-		if (cq->ibcq.event_handler)
-			cq->ibcq.event_handler(&ib_event,
-					       cq->ibcq.cq_context);
-		break;
-	}
-
-	default:
-		printk("Bad resource indicator = %d\n",
-		       resource_indicator);
-		break;
-	}
-
- ignore_it:
-	c2_mq_free(mq);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_ae.h b/drivers/staging/rdma/amso1100/c2_ae.h
deleted file mode 100644
index 3a065c3..0000000
--- a/drivers/staging/rdma/amso1100/c2_ae.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_AE_H_
-#define _C2_AE_H_
-
-/*
- * WARNING: If you change this file, also bump C2_IVN_BASE
- * in common/include/clustercore/c2_ivn.h.
- */
-
-/*
- * Asynchronous Event Identifiers
- *
- * These start at 0x80 only so it's obvious from inspection that
- * they are not work-request statuses.  This isn't critical.
- *
- * NOTE: these event id's must fit in eight bits.
- */
-enum c2_event_id {
-	CCAE_REMOTE_SHUTDOWN = 0x80,
-	CCAE_ACTIVE_CONNECT_RESULTS,
-	CCAE_CONNECTION_REQUEST,
-	CCAE_LLP_CLOSE_COMPLETE,
-	CCAE_TERMINATE_MESSAGE_RECEIVED,
-	CCAE_LLP_CONNECTION_RESET,
-	CCAE_LLP_CONNECTION_LOST,
-	CCAE_LLP_SEGMENT_SIZE_INVALID,
-	CCAE_LLP_INVALID_CRC,
-	CCAE_LLP_BAD_FPDU,
-	CCAE_INVALID_DDP_VERSION,
-	CCAE_INVALID_RDMA_VERSION,
-	CCAE_UNEXPECTED_OPCODE,
-	CCAE_INVALID_DDP_QUEUE_NUMBER,
-	CCAE_RDMA_READ_NOT_ENABLED,
-	CCAE_RDMA_WRITE_NOT_ENABLED,
-	CCAE_RDMA_READ_TOO_SMALL,
-	CCAE_NO_L_BIT,
-	CCAE_TAGGED_INVALID_STAG,
-	CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
-	CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
-	CCAE_TAGGED_INVALID_PD,
-	CCAE_WRAP_ERROR,
-	CCAE_BAD_CLOSE,
-	CCAE_BAD_LLP_CLOSE,
-	CCAE_INVALID_MSN_RANGE,
-	CCAE_INVALID_MSN_GAP,
-	CCAE_IRRQ_OVERFLOW,
-	CCAE_IRRQ_MSN_GAP,
-	CCAE_IRRQ_MSN_RANGE,
-	CCAE_IRRQ_INVALID_STAG,
-	CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
-	CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
-	CCAE_IRRQ_INVALID_PD,
-	CCAE_IRRQ_WRAP_ERROR,
-	CCAE_CQ_SQ_COMPLETION_OVERFLOW,
-	CCAE_CQ_RQ_COMPLETION_ERROR,
-	CCAE_QP_SRQ_WQE_ERROR,
-	CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
-	CCAE_CQ_OVERFLOW,
-	CCAE_CQ_OPERATION_ERROR,
-	CCAE_SRQ_LIMIT_REACHED,
-	CCAE_QP_RQ_LIMIT_REACHED,
-	CCAE_SRQ_CATASTROPHIC_ERROR,
-	CCAE_RNIC_CATASTROPHIC_ERROR
-/* WARNING If you add more id's, make sure their values fit in eight bits. */
-};
-
-/*
- * Resource Indicators and Identifiers
- */
-enum c2_resource_indicator {
-	C2_RES_IND_QP = 1,
-	C2_RES_IND_EP,
-	C2_RES_IND_CQ,
-	C2_RES_IND_SRQ,
-};
-
-#endif /* _C2_AE_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_alloc.c b/drivers/staging/rdma/amso1100/c2_alloc.c
deleted file mode 100644
index 039872d..0000000
--- a/drivers/staging/rdma/amso1100/c2_alloc.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/bitmap.h>
-
-#include "c2.h"
-
-static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
-			       struct sp_chunk **head)
-{
-	int i;
-	struct sp_chunk *new_head;
-	dma_addr_t dma_addr;
-
-	new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
-				      &dma_addr, gfp_mask);
-	if (new_head == NULL)
-		return -ENOMEM;
-
-	new_head->dma_addr = dma_addr;
-	dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
-
-	new_head->next = NULL;
-	new_head->head = 0;
-
-	/* build list where each index is the next free slot */
-	for (i = 0;
-	     i < (PAGE_SIZE - sizeof(struct sp_chunk) -
-		  sizeof(u16)) / sizeof(u16) - 1;
-	     i++) {
-		new_head->shared_ptr[i] = i + 1;
-	}
-	/* terminate list */
-	new_head->shared_ptr[i] = 0xFFFF;
-
-	*head = new_head;
-	return 0;
-}
-
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
-		      struct sp_chunk **root)
-{
-	return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
-}
-
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
-{
-	struct sp_chunk *next;
-
-	while (root) {
-		next = root->next;
-		dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
-				  dma_unmap_addr(root, mapping));
-		root = next;
-	}
-}
-
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-		      dma_addr_t *dma_addr, gfp_t gfp_mask)
-{
-	u16 mqsp;
-
-	while (head) {
-		mqsp = head->head;
-		if (mqsp != 0xFFFF) {
-			head->head = head->shared_ptr[mqsp];
-			break;
-		} else if (head->next == NULL) {
-			if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
-			    0) {
-				head = head->next;
-				mqsp = head->head;
-				head->head = head->shared_ptr[mqsp];
-				break;
-			} else
-				return NULL;
-		} else
-			head = head->next;
-	}
-	if (head) {
-		*dma_addr = head->dma_addr +
-			    ((unsigned long) &(head->shared_ptr[mqsp]) -
-			     (unsigned long) head);
-		pr_debug("%s addr %p dma_addr %llx\n", __func__,
-			 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
-		return (__force __be16 *) &(head->shared_ptr[mqsp]);
-	}
-	return NULL;
-}
-
-void c2_free_mqsp(__be16 *mqsp)
-{
-	struct sp_chunk *head;
-	u16 idx;
-
-	/* The chunk containing this ptr begins at the page boundary */
-	head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
-
-	/* Link head to new mqsp */
-	*mqsp = (__force __be16) head->head;
-
-	/* Compute the shared_ptr index */
-	idx = (offset_in_page(mqsp)) >> 1;
-	idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
-
-	/* Point this index at the head */
-	head->shared_ptr[idx] = head->head;
-
-	/* Point head at this index */
-	head->head = idx;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cm.c b/drivers/staging/rdma/amso1100/c2_cm.c
deleted file mode 100644
index f8dbdb9..0000000
--- a/drivers/staging/rdma/amso1100/c2_cm.c
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_vq.h"
-#include <rdma/iw_cm.h>
-
-int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-	struct c2_dev *c2dev = to_c2dev(cm_id->device);
-	struct ib_qp *ibqp;
-	struct c2_qp *qp;
-	struct c2wr_qp_connect_req *wr;	/* variable size needs a malloc. */
-	struct c2_vq_req *vq_req;
-	int err;
-	struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
-	if (cm_id->remote_addr.ss_family != AF_INET)
-		return -ENOSYS;
-
-	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
-	if (!ibqp)
-		return -EINVAL;
-	qp = to_c2qp(ibqp);
-
-	/* Associate QP <--> CM_ID */
-	cm_id->provider_data = qp;
-	cm_id->add_ref(cm_id);
-	qp->cm_id = cm_id;
-
-	/*
-	 * only support the max private_data length
-	 */
-	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
-		err = -EINVAL;
-		goto bail0;
-	}
-	/*
-	 * Set the rdma read limits
-	 */
-	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
-	if (err)
-		goto bail0;
-
-	/*
-	 * Create and send a WR_QP_CONNECT...
-	 */
-	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-	if (!wr) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	c2_wr_set_id(wr, CCWR_QP_CONNECT);
-	wr->hdr.context = 0;
-	wr->rnic_handle = c2dev->adapter_handle;
-	wr->qp_handle = qp->adapter_handle;
-
-	wr->remote_addr = raddr->sin_addr.s_addr;
-	wr->remote_port = raddr->sin_port;
-
-	/*
-	 * Move any private data from the callers's buf into
-	 * the WR.
-	 */
-	if (iw_param->private_data) {
-		wr->private_data_length =
-			cpu_to_be32(iw_param->private_data_len);
-		memcpy(&wr->private_data[0], iw_param->private_data,
-		       iw_param->private_data_len);
-	} else
-		wr->private_data_length = 0;
-
-	/*
-	 * Send WR to adapter.  NOTE: There is no synch reply from
-	 * the adapter.
-	 */
-	err = vq_send_wr(c2dev, (union c2wr *) wr);
-	vq_req_free(c2dev, vq_req);
-
- bail1:
-	kfree(wr);
- bail0:
-	if (err) {
-		/*
-		 * If we fail, release reference on QP and
-		 * disassociate QP from CM_ID
-		 */
-		cm_id->provider_data = NULL;
-		qp->cm_id = NULL;
-		cm_id->rem_ref(cm_id);
-	}
-	return err;
-}
-
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
-{
-	struct c2_dev *c2dev;
-	struct c2wr_ep_listen_create_req wr;
-	struct c2wr_ep_listen_create_rep *reply;
-	struct c2_vq_req *vq_req;
-	int err;
-	struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-
-	if (cm_id->local_addr.ss_family != AF_INET)
-		return -ENOSYS;
-
-	c2dev = to_c2dev(cm_id->device);
-	if (c2dev == NULL)
-		return -EINVAL;
-
-	/*
-	 * Allocate verbs request.
-	 */
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	/*
-	 * Build the WR
-	 */
-	c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
-	wr.hdr.context = (u64) (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.local_addr = laddr->sin_addr.s_addr;
-	wr.local_port = laddr->sin_port;
-	wr.backlog = cpu_to_be32(backlog);
-	wr.user_context = (u64) (unsigned long) cm_id;
-
-	/*
-	 * Reference the request struct.  Dereferenced in the int handler.
-	 */
-	vq_req_get(c2dev, vq_req);
-
-	/*
-	 * Send WR to adapter
-	 */
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	/*
-	 * Wait for reply from adapter
-	 */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail0;
-
-	/*
-	 * Process reply
-	 */
-	reply =
-	    (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	if ((err = c2_errno(reply)) != 0)
-		goto bail1;
-
-	/*
-	 * Keep the adapter handle. Used in subsequent destroy
-	 */
-	cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
-
-	/*
-	 * free vq stuff
-	 */
-	vq_repbuf_free(c2dev, reply);
-	vq_req_free(c2dev, vq_req);
-
-	return 0;
-
- bail1:
-	vq_repbuf_free(c2dev, reply);
- bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-
-int c2_llp_service_destroy(struct iw_cm_id *cm_id)
-{
-
-	struct c2_dev *c2dev;
-	struct c2wr_ep_listen_destroy_req wr;
-	struct c2wr_ep_listen_destroy_rep *reply;
-	struct c2_vq_req *vq_req;
-	int err;
-
-	c2dev = to_c2dev(cm_id->device);
-	if (c2dev == NULL)
-		return -EINVAL;
-
-	/*
-	 * Allocate verbs request.
-	 */
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	/*
-	 * Build the WR
-	 */
-	c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
-
-	/*
-	 * reference the request struct.  dereferenced in the int handler.
-	 */
-	vq_req_get(c2dev, vq_req);
-
-	/*
-	 * Send WR to adapter
-	 */
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	/*
-	 * Wait for reply from adapter
-	 */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail0;
-
-	/*
-	 * Process reply
-	 */
-	reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	vq_repbuf_free(c2dev, reply);
- bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-	struct c2_dev *c2dev = to_c2dev(cm_id->device);
-	struct c2_qp *qp;
-	struct ib_qp *ibqp;
-	struct c2wr_cr_accept_req *wr;	/* variable length WR */
-	struct c2_vq_req *vq_req;
-	struct c2wr_cr_accept_rep *reply;	/* VQ Reply msg ptr. */
-	int err;
-
-	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
-	if (!ibqp)
-		return -EINVAL;
-	qp = to_c2qp(ibqp);
-
-	/* Set the RDMA read limits */
-	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
-	if (err)
-		goto bail0;
-
-	/* Allocate verbs request. */
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-	vq_req->qp = qp;
-	vq_req->cm_id = cm_id;
-	vq_req->event = IW_CM_EVENT_ESTABLISHED;
-
-	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-	if (!wr) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	/* Build the WR */
-	c2_wr_set_id(wr, CCWR_CR_ACCEPT);
-	wr->hdr.context = (unsigned long) vq_req;
-	wr->rnic_handle = c2dev->adapter_handle;
-	wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
-	wr->qp_handle = qp->adapter_handle;
-
-	/* Replace the cr_handle with the QP after accept */
-	cm_id->provider_data = qp;
-	cm_id->add_ref(cm_id);
-	qp->cm_id = cm_id;
-
-	cm_id->provider_data = qp;
-
-	/* Validate private_data length */
-	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
-		err = -EINVAL;
-		goto bail1;
-	}
-
-	if (iw_param->private_data) {
-		wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
-		memcpy(&wr->private_data[0],
-		       iw_param->private_data, iw_param->private_data_len);
-	} else
-		wr->private_data_length = 0;
-
-	/* Reference the request struct.  Dereferenced in the int handler. */
-	vq_req_get(c2dev, vq_req);
-
-	/* Send WR to adapter */
-	err = vq_send_wr(c2dev, (union c2wr *) wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail1;
-	}
-
-	/* Wait for reply from adapter */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail1;
-
-	/* Check that reply is present */
-	reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	err = c2_errno(reply);
-	vq_repbuf_free(c2dev, reply);
-
-	if (!err)
-		c2_set_qp_state(qp, C2_QP_STATE_RTS);
- bail1:
-	kfree(wr);
-	vq_req_free(c2dev, vq_req);
- bail0:
-	if (err) {
-		/*
-		 * If we fail, release reference on QP and
-		 * disassociate QP from CM_ID
-		 */
-		cm_id->provider_data = NULL;
-		qp->cm_id = NULL;
-		cm_id->rem_ref(cm_id);
-	}
-	return err;
-}
-
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
-	struct c2_dev *c2dev;
-	struct c2wr_cr_reject_req wr;
-	struct c2_vq_req *vq_req;
-	struct c2wr_cr_reject_rep *reply;
-	int err;
-
-	c2dev = to_c2dev(cm_id->device);
-
-	/*
-	 * Allocate verbs request.
-	 */
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	/*
-	 * Build the WR
-	 */
-	c2_wr_set_id(&wr, CCWR_CR_REJECT);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
-
-	/*
-	 * reference the request struct.  dereferenced in the int handler.
-	 */
-	vq_req_get(c2dev, vq_req);
-
-	/*
-	 * Send WR to adapter
-	 */
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	/*
-	 * Wait for reply from adapter
-	 */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail0;
-
-	/*
-	 * Process reply
-	 */
-	reply = (struct c2wr_cr_reject_rep *) (unsigned long)
-		vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-	err = c2_errno(reply);
-	/*
-	 * free vq stuff
-	 */
-	vq_repbuf_free(c2dev, reply);
-
- bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
deleted file mode 100644
index 7ad0c08..0000000
--- a/drivers/staging/rdma/amso1100/c2_cq.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
-
-static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
-{
-	struct c2_cq *cq;
-	unsigned long flags;
-
-	spin_lock_irqsave(&c2dev->lock, flags);
-	cq = c2dev->qptr_array[cqn];
-	if (!cq) {
-		spin_unlock_irqrestore(&c2dev->lock, flags);
-		return NULL;
-	}
-	atomic_inc(&cq->refcount);
-	spin_unlock_irqrestore(&c2dev->lock, flags);
-	return cq;
-}
-
-static void c2_cq_put(struct c2_cq *cq)
-{
-	if (atomic_dec_and_test(&cq->refcount))
-		wake_up(&cq->wait);
-}
-
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
-{
-	struct c2_cq *cq;
-
-	cq = c2_cq_get(c2dev, mq_index);
-	if (!cq) {
-		printk("discarding events on destroyed CQN=%d\n", mq_index);
-		return;
-	}
-
-	(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-	c2_cq_put(cq);
-}
-
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
-{
-	struct c2_cq *cq;
-	struct c2_mq *q;
-
-	cq = c2_cq_get(c2dev, mq_index);
-	if (!cq)
-		return;
-
-	spin_lock_irq(&cq->lock);
-	q = &cq->mq;
-	if (q && !c2_mq_empty(q)) {
-		u16 priv = q->priv;
-		struct c2wr_ce *msg;
-
-		while (priv != be16_to_cpu(*q->shared)) {
-			msg = (struct c2wr_ce *)
-				(q->msg_pool.host + priv * q->msg_size);
-			if (msg->qp_user_context == (u64) (unsigned long) qp) {
-				msg->qp_user_context = (u64) 0;
-			}
-			priv = (priv + 1) % q->q_size;
-		}
-	}
-	spin_unlock_irq(&cq->lock);
-	c2_cq_put(cq);
-}
-
-static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
-{
-	switch (status) {
-	case C2_OK:
-		return IB_WC_SUCCESS;
-	case CCERR_FLUSHED:
-		return IB_WC_WR_FLUSH_ERR;
-	case CCERR_BASE_AND_BOUNDS_VIOLATION:
-		return IB_WC_LOC_PROT_ERR;
-	case CCERR_ACCESS_VIOLATION:
-		return IB_WC_LOC_ACCESS_ERR;
-	case CCERR_TOTAL_LENGTH_TOO_BIG:
-		return IB_WC_LOC_LEN_ERR;
-	case CCERR_INVALID_WINDOW:
-		return IB_WC_MW_BIND_ERR;
-	default:
-		return IB_WC_GENERAL_ERR;
-	}
-}
-
-
-static inline int c2_poll_one(struct c2_dev *c2dev,
-			      struct c2_cq *cq, struct ib_wc *entry)
-{
-	struct c2wr_ce *ce;
-	struct c2_qp *qp;
-	int is_recv = 0;
-
-	ce = c2_mq_consume(&cq->mq);
-	if (!ce) {
-		return -EAGAIN;
-	}
-
-	/*
-	 * if the qp returned is null then this qp has already
-	 * been freed and we are unable process the completion.
-	 * try pulling the next message
-	 */
-	while ((qp =
-		(struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
-		c2_mq_free(&cq->mq);
-		ce = c2_mq_consume(&cq->mq);
-		if (!ce)
-			return -EAGAIN;
-	}
-
-	entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
-	entry->wr_id = ce->hdr.context;
-	entry->qp = &qp->ibqp;
-	entry->wc_flags = 0;
-	entry->slid = 0;
-	entry->sl = 0;
-	entry->src_qp = 0;
-	entry->dlid_path_bits = 0;
-	entry->pkey_index = 0;
-
-	switch (c2_wr_get_id(ce)) {
-	case C2_WR_TYPE_SEND:
-		entry->opcode = IB_WC_SEND;
-		break;
-	case C2_WR_TYPE_RDMA_WRITE:
-		entry->opcode = IB_WC_RDMA_WRITE;
-		break;
-	case C2_WR_TYPE_RDMA_READ:
-		entry->opcode = IB_WC_RDMA_READ;
-		break;
-	case C2_WR_TYPE_RECV:
-		entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
-		entry->opcode = IB_WC_RECV;
-		is_recv = 1;
-		break;
-	default:
-		break;
-	}
-
-	/* consume the WQEs */
-	if (is_recv)
-		c2_mq_lconsume(&qp->rq_mq, 1);
-	else
-		c2_mq_lconsume(&qp->sq_mq,
-			       be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
-
-	/* free the message */
-	c2_mq_free(&cq->mq);
-
-	return 0;
-}
-
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
-	struct c2_dev *c2dev = to_c2dev(ibcq->device);
-	struct c2_cq *cq = to_c2cq(ibcq);
-	unsigned long flags;
-	int npolled, err;
-
-	spin_lock_irqsave(&cq->lock, flags);
-
-	for (npolled = 0; npolled < num_entries; ++npolled) {
-
-		err = c2_poll_one(c2dev, cq, entry + npolled);
-		if (err)
-			break;
-	}
-
-	spin_unlock_irqrestore(&cq->lock, flags);
-
-	return npolled;
-}
-
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
-	struct c2_mq_shared __iomem *shared;
-	struct c2_cq *cq;
-	unsigned long flags;
-	int ret = 0;
-
-	cq = to_c2cq(ibcq);
-	shared = cq->mq.peer;
-
-	if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
-		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
-	else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
-		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
-	else
-		return -EINVAL;
-
-	writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
-
-	/*
-	 * Now read back shared->armed to make the PCI
-	 * write synchronous.  This is necessary for
-	 * correct cq notification semantics.
-	 */
-	readb(&shared->armed);
-
-	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
-		spin_lock_irqsave(&cq->lock, flags);
-		ret = !c2_mq_empty(&cq->mq);
-		spin_unlock_irqrestore(&cq->lock, flags);
-	}
-
-	return ret;
-}
-
-static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
-{
-	dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
-			  mq->msg_pool.host, dma_unmap_addr(mq, mapping));
-}
-
-static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
-			   size_t q_size, size_t msg_size)
-{
-	u8 *pool_start;
-
-	if (q_size > SIZE_MAX / msg_size)
-		return -EINVAL;
-
-	pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
-					&mq->host_dma, GFP_KERNEL);
-	if (!pool_start)
-		return -ENOMEM;
-
-	c2_mq_rep_init(mq,
-		       0,		/* index (currently unknown) */
-		       q_size,
-		       msg_size,
-		       pool_start,
-		       NULL,	/* peer (currently unknown) */
-		       C2_MQ_HOST_TARGET);
-
-	dma_unmap_addr_set(mq, mapping, mq->host_dma);
-
-	return 0;
-}
-
-int c2_init_cq(struct c2_dev *c2dev, int entries,
-	       struct c2_ucontext *ctx, struct c2_cq *cq)
-{
-	struct c2wr_cq_create_req wr;
-	struct c2wr_cq_create_rep *reply;
-	unsigned long peer_pa;
-	struct c2_vq_req *vq_req;
-	int err;
-
-	might_sleep();
-
-	cq->ibcq.cqe = entries - 1;
-	cq->is_kernel = !ctx;
-
-	/* Allocate a shared pointer */
-	cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-				      &cq->mq.shared_dma, GFP_KERNEL);
-	if (!cq->mq.shared)
-		return -ENOMEM;
-
-	/* Allocate pages for the message pool */
-	err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
-	if (err)
-		goto bail0;
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	memset(&wr, 0, sizeof(wr));
-	c2_wr_set_id(&wr, CCWR_CQ_CREATE);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.msg_size = cpu_to_be32(cq->mq.msg_size);
-	wr.depth = cpu_to_be32(cq->mq.q_size);
-	wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
-	wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
-	wr.user_context = (u64) (unsigned long) (cq);
-
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail2;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail2;
-
-	reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail2;
-	}
-
-	if ((err = c2_errno(reply)) != 0)
-		goto bail3;
-
-	cq->adapter_handle = reply->cq_handle;
-	cq->mq.index = be32_to_cpu(reply->mq_index);
-
-	peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
-	cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
-	if (!cq->mq.peer) {
-		err = -ENOMEM;
-		goto bail3;
-	}
-
-	vq_repbuf_free(c2dev, reply);
-	vq_req_free(c2dev, vq_req);
-
-	spin_lock_init(&cq->lock);
-	atomic_set(&cq->refcount, 1);
-	init_waitqueue_head(&cq->wait);
-
-	/*
-	 * Use the MQ index allocated by the adapter to
-	 * store the CQ in the qptr_array
-	 */
-	cq->cqn = cq->mq.index;
-	c2dev->qptr_array[cq->cqn] = cq;
-
-	return 0;
-
-bail3:
-	vq_repbuf_free(c2dev, reply);
-bail2:
-	vq_req_free(c2dev, vq_req);
-bail1:
-	c2_free_cq_buf(c2dev, &cq->mq);
-bail0:
-	c2_free_mqsp(cq->mq.shared);
-
-	return err;
-}
-
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
-{
-	int err;
-	struct c2_vq_req *vq_req;
-	struct c2wr_cq_destroy_req wr;
-	struct c2wr_cq_destroy_rep *reply;
-
-	might_sleep();
-
-	/* Clear CQ from the qptr array */
-	spin_lock_irq(&c2dev->lock);
-	c2dev->qptr_array[cq->mq.index] = NULL;
-	atomic_dec(&cq->refcount);
-	spin_unlock_irq(&c2dev->lock);
-
-	wait_event(cq->wait, !atomic_read(&cq->refcount));
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req) {
-		goto bail0;
-	}
-
-	memset(&wr, 0, sizeof(wr));
-	c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.cq_handle = cq->adapter_handle;
-
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail1;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail1;
-
-	reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
-	if (reply)
-		vq_repbuf_free(c2dev, reply);
-bail1:
-	vq_req_free(c2dev, vq_req);
-bail0:
-	if (cq->is_kernel) {
-		c2_free_cq_buf(c2dev, &cq->mq);
-	}
-
-	return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_intr.c b/drivers/staging/rdma/amso1100/c2_intr.c
deleted file mode 100644
index 74b32a9..0000000
--- a/drivers/staging/rdma/amso1100/c2_intr.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_vq.h"
-
-static void handle_mq(struct c2_dev *c2dev, u32 index);
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
-
-/*
- * Handle RNIC interrupts
- */
-void c2_rnic_interrupt(struct c2_dev *c2dev)
-{
-	unsigned int mq_index;
-
-	while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
-		mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
-		if (mq_index & 0x80000000) {
-			break;
-		}
-
-		c2dev->hints_read++;
-		handle_mq(c2dev, mq_index);
-	}
-
-}
-
-/*
- * Top level MQ handler
- */
-static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
-{
-	if (c2dev->qptr_array[mq_index] == NULL) {
-		pr_debug("handle_mq: stray activity for mq_index=%d\n",
-			 mq_index);
-		return;
-	}
-
-	switch (mq_index) {
-	case (0):
-		/*
-		 * An index of 0 in the activity queue
-		 * indicates the req vq now has messages
-		 * available...
-		 *
-		 * Wake up any waiters waiting on req VQ
-		 * message availability.
-		 */
-		wake_up(&c2dev->req_vq_wo);
-		break;
-	case (1):
-		handle_vq(c2dev, mq_index);
-		break;
-	case (2):
-		/* We have to purge the VQ in case there are pending
-		 * accept reply requests that would result in the
-		 * generation of an ESTABLISHED event. If we don't
-		 * generate these first, a CLOSE event could end up
-		 * being delivered before the ESTABLISHED event.
-		 */
-		handle_vq(c2dev, 1);
-
-		c2_ae_event(c2dev, mq_index);
-		break;
-	default:
-		/* There is no event synchronization between CQ events
-		 * and AE or CM events. In fact, CQE could be
-		 * delivered for all of the I/O up to and including the
-		 * FLUSH for a peer disconenct prior to the ESTABLISHED
-		 * event being delivered to the app. The reason for this
-		 * is that CM events are delivered on a thread, while AE
-		 * and CM events are delivered on interrupt context.
-		 */
-		c2_cq_event(c2dev, mq_index);
-		break;
-	}
-
-	return;
-}
-
-/*
- * Handles verbs WR replies.
- */
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
-{
-	void *adapter_msg, *reply_msg;
-	struct c2wr_hdr *host_msg;
-	struct c2wr_hdr tmp;
-	struct c2_mq *reply_vq;
-	struct c2_vq_req *req;
-	struct iw_cm_event cm_event;
-	int err;
-
-	reply_vq = c2dev->qptr_array[mq_index];
-
-	/*
-	 * get next msg from mq_index into adapter_msg.
-	 * don't free it yet.
-	 */
-	adapter_msg = c2_mq_consume(reply_vq);
-	if (adapter_msg == NULL) {
-		return;
-	}
-
-	host_msg = vq_repbuf_alloc(c2dev);
-
-	/*
-	 * If we can't get a host buffer, then we'll still
-	 * wakeup the waiter, we just won't give him the msg.
-	 * It is assumed the waiter will deal with this...
-	 */
-	if (!host_msg) {
-		pr_debug("handle_vq: no repbufs!\n");
-
-		/*
-		 * just copy the WR header into a local variable.
-		 * this allows us to still demux on the context
-		 */
-		host_msg = &tmp;
-		memcpy(host_msg, adapter_msg, sizeof(tmp));
-		reply_msg = NULL;
-	} else {
-		memcpy(host_msg, adapter_msg, reply_vq->msg_size);
-		reply_msg = host_msg;
-	}
-
-	/*
-	 * consume the msg from the MQ
-	 */
-	c2_mq_free(reply_vq);
-
-	/*
-	 * wakeup the waiter.
-	 */
-	req = (struct c2_vq_req *) (unsigned long) host_msg->context;
-	if (req == NULL) {
-		/*
-		 * We should never get here, as the adapter should
-		 * never send us a reply that we're not expecting.
-		 */
-		if (reply_msg != NULL)
-			vq_repbuf_free(c2dev, host_msg);
-		pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
-		return;
-	}
-
-	if (reply_msg)
-		err = c2_errno(reply_msg);
-	else
-		err = -ENOMEM;
-
-	if (!err) switch (req->event) {
-	case IW_CM_EVENT_ESTABLISHED:
-		c2_set_qp_state(req->qp,
-				C2_QP_STATE_RTS);
-		/*
-		 * Until ird/ord negotiation via MPAv2 support is added, send
-		 * max supported values
-		 */
-		cm_event.ird = cm_event.ord = 128;
-	case IW_CM_EVENT_CLOSE:
-
-		/*
-		 * Move the QP to RTS if this is
-		 * the established event
-		 */
-		cm_event.event = req->event;
-		cm_event.status = 0;
-		cm_event.local_addr = req->cm_id->local_addr;
-		cm_event.remote_addr = req->cm_id->remote_addr;
-		cm_event.private_data = NULL;
-		cm_event.private_data_len = 0;
-		req->cm_id->event_handler(req->cm_id, &cm_event);
-		break;
-	default:
-		break;
-	}
-
-	req->reply_msg = (u64) (unsigned long) (reply_msg);
-	atomic_set(&req->reply_ready, 1);
-	wake_up(&req->wait_object);
-
-	/*
-	 * If the request was cancelled, then this put will
-	 * free the vq_req memory...and reply_msg!!!
-	 */
-	vq_req_put(c2dev, req);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mm.c b/drivers/staging/rdma/amso1100/c2_mm.c
deleted file mode 100644
index 25081e2..0000000
--- a/drivers/staging/rdma/amso1100/c2_mm.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-
-#define PBL_VIRT 1
-#define PBL_PHYS 2
-
-/*
- * Send all the PBL messages to convey the remainder of the PBL
- * Wait for the adapter's reply on the last one.
- * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
- *
- * NOTE:  vq_req is _not_ freed by this function.  The VQ Host
- *	  Reply buffer _is_ freed by this function.
- */
-static int
-send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
-		  unsigned long va, u32 pbl_depth,
-		  struct c2_vq_req *vq_req, int pbl_type)
-{
-	u32 pbe_count;		/* amt that fits in a PBL msg */
-	u32 count;		/* amt in this PBL MSG. */
-	struct c2wr_nsmr_pbl_req *wr;	/* PBL WR ptr */
-	struct c2wr_nsmr_pbl_rep *reply;	/* reply ptr */
- 	int err, pbl_virt, pbl_index, i;
-
-	switch (pbl_type) {
-	case PBL_VIRT:
-		pbl_virt = 1;
-		break;
-	case PBL_PHYS:
-		pbl_virt = 0;
-		break;
-	default:
-		return -EINVAL;
-		break;
-	}
-
-	pbe_count = (c2dev->req_vq.msg_size -
-		     sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
-	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-	if (!wr) {
-		return -ENOMEM;
-	}
-	c2_wr_set_id(wr, CCWR_NSMR_PBL);
-
-	/*
-	 * Only the last PBL message will generate a reply from the verbs,
-	 * so we set the context to 0 indicating there is no kernel verbs
-	 * handler blocked awaiting this reply.
-	 */
-	wr->hdr.context = 0;
-	wr->rnic_handle = c2dev->adapter_handle;
-	wr->stag_index = stag_index;	/* already swapped */
-	wr->flags = 0;
-	pbl_index = 0;
-	while (pbl_depth) {
-		count = min(pbe_count, pbl_depth);
-		wr->addrs_length = cpu_to_be32(count);
-
-		/*
-		 *  If this is the last message, then reference the
-		 *  vq request struct cuz we're gonna wait for a reply.
-		 *  also make this PBL msg as the last one.
-		 */
-		if (count == pbl_depth) {
-			/*
-			 * reference the request struct.  dereferenced in the
-			 * int handler.
-			 */
-			vq_req_get(c2dev, vq_req);
-			wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
-
-			/*
-			 * This is the last PBL message.
-			 * Set the context to our VQ Request Object so we can
-			 * wait for the reply.
-			 */
-			wr->hdr.context = (unsigned long) vq_req;
-		}
-
-		/*
-		 * If pbl_virt is set then va is a virtual address
-		 * that describes a virtually contiguous memory
-		 * allocation. The wr needs the start of each virtual page
-		 * to be converted to the corresponding physical address
-		 * of the page. If pbl_virt is not set then va is an array
-		 * of physical addresses and there is no conversion to do.
-		 * Just fill in the wr with what is in the array.
-		 */
-		for (i = 0; i < count; i++) {
-			if (pbl_virt) {
-				va += PAGE_SIZE;
-			} else {
- 				wr->paddrs[i] =
-				    cpu_to_be64(((u64 *)va)[pbl_index + i]);
-			}
-		}
-
-		/*
-		 * Send WR to adapter
-		 */
-		err = vq_send_wr(c2dev, (union c2wr *) wr);
-		if (err) {
-			if (count <= pbe_count) {
-				vq_req_put(c2dev, vq_req);
-			}
-			goto bail0;
-		}
-		pbl_depth -= count;
-		pbl_index += count;
-	}
-
-	/*
-	 *  Now wait for the reply...
-	 */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err) {
-		goto bail0;
-	}
-
-	/*
-	 * Process reply
-	 */
-	reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	err = c2_errno(reply);
-
-	vq_repbuf_free(c2dev, reply);
-bail0:
-	kfree(wr);
-	return err;
-}
-
-#define C2_PBL_MAX_DEPTH 131072
-int
-c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
- 			   int page_size, int pbl_depth, u32 length,
- 			   u32 offset, u64 *va, enum c2_acf acf,
-			   struct c2_mr *mr)
-{
-	struct c2_vq_req *vq_req;
-	struct c2wr_nsmr_register_req *wr;
-	struct c2wr_nsmr_register_rep *reply;
-	u16 flags;
-	int i, pbe_count, count;
-	int err;
-
-	if (!va || !length || !addr_list || !pbl_depth)
-		return -EINTR;
-
-	/*
-	 * Verify PBL depth is within rnic max
-	 */
-	if (pbl_depth > C2_PBL_MAX_DEPTH) {
-		return -EINTR;
-	}
-
-	/*
-	 * allocate verbs request object
-	 */
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-	if (!wr) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	/*
-	 * build the WR
-	 */
-	c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
-	wr->hdr.context = (unsigned long) vq_req;
-	wr->rnic_handle = c2dev->adapter_handle;
-
-	flags = (acf | MEM_VA_BASED | MEM_REMOTE);
-
-	/*
-	 * compute how many pbes can fit in the message
-	 */
-	pbe_count = (c2dev->req_vq.msg_size -
-		     sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
-
-	if (pbl_depth <= pbe_count) {
-		flags |= MEM_PBL_COMPLETE;
-	}
-	wr->flags = cpu_to_be16(flags);
-	wr->stag_key = 0;	//stag_key;
-	wr->va = cpu_to_be64(*va);
-	wr->pd_id = mr->pd->pd_id;
-	wr->pbe_size = cpu_to_be32(page_size);
-	wr->length = cpu_to_be32(length);
-	wr->pbl_depth = cpu_to_be32(pbl_depth);
-	wr->fbo = cpu_to_be32(offset);
-	count = min(pbl_depth, pbe_count);
-	wr->addrs_length = cpu_to_be32(count);
-
-	/*
-	 * fill out the PBL for this message
-	 */
-	for (i = 0; i < count; i++) {
-		wr->paddrs[i] = cpu_to_be64(addr_list[i]);
-	}
-
-	/*
-	 * regerence the request struct
-	 */
-	vq_req_get(c2dev, vq_req);
-
-	/*
-	 * send the WR to the adapter
-	 */
-	err = vq_send_wr(c2dev, (union c2wr *) wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail1;
-	}
-
-	/*
-	 * wait for reply from adapter
-	 */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err) {
-		goto bail1;
-	}
-
-	/*
-	 * process reply
-	 */
-	reply =
-	    (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-	if ((err = c2_errno(reply))) {
-		goto bail2;
-	}
-	//*p_pb_entries = be32_to_cpu(reply->pbl_depth);
-	mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
-	vq_repbuf_free(c2dev, reply);
-
-	/*
-	 * if there are still more PBEs we need to send them to
-	 * the adapter and wait for a reply on the final one.
-	 * reuse vq_req for this purpose.
-	 */
-	pbl_depth -= count;
-	if (pbl_depth) {
-
-		vq_req->reply_msg = (unsigned long) NULL;
-		atomic_set(&vq_req->reply_ready, 0);
-		err = send_pbl_messages(c2dev,
-					cpu_to_be32(mr->ibmr.lkey),
-					(unsigned long) &addr_list[i],
-					pbl_depth, vq_req, PBL_PHYS);
-		if (err) {
-			goto bail1;
-		}
-	}
-
-	vq_req_free(c2dev, vq_req);
-	kfree(wr);
-
-	return err;
-
-bail2:
-	vq_repbuf_free(c2dev, reply);
-bail1:
-	kfree(wr);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
-{
-	struct c2_vq_req *vq_req;	/* verbs request object */
-	struct c2wr_stag_dealloc_req wr;	/* work request */
-	struct c2wr_stag_dealloc_rep *reply;	/* WR reply  */
-	int err;
-
-
-	/*
-	 * allocate verbs request object
-	 */
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req) {
-		return -ENOMEM;
-	}
-
-	/*
-	 * Build the WR
-	 */
-	c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
-	wr.hdr.context = (u64) (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.stag_index = cpu_to_be32(stag_index);
-
-	/*
-	 * reference the request struct.  dereferenced in the int handler.
-	 */
-	vq_req_get(c2dev, vq_req);
-
-	/*
-	 * Send WR to adapter
-	 */
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	/*
-	 * Wait for reply from adapter
-	 */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err) {
-		goto bail0;
-	}
-
-	/*
-	 * Process reply
-	 */
-	reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	err = c2_errno(reply);
-
-	vq_repbuf_free(c2dev, reply);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.c b/drivers/staging/rdma/amso1100/c2_mq.c
deleted file mode 100644
index 7827fb8..0000000
--- a/drivers/staging/rdma/amso1100/c2_mq.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include "c2_mq.h"
-
-void *c2_mq_alloc(struct c2_mq *q)
-{
-	BUG_ON(q->magic != C2_MQ_MAGIC);
-	BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-	if (c2_mq_full(q)) {
-		return NULL;
-	} else {
-#ifdef DEBUG
-		struct c2wr_hdr *m =
-		    (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
-		BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
-		m->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-		return m;
-#else
-		return q->msg_pool.host + q->priv * q->msg_size;
-#endif
-	}
-}
-
-void c2_mq_produce(struct c2_mq *q)
-{
-	BUG_ON(q->magic != C2_MQ_MAGIC);
-	BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-	if (!c2_mq_full(q)) {
-		q->priv = (q->priv + 1) % q->q_size;
-		q->hint_count++;
-		/* Update peer's offset. */
-		__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
-	}
-}
-
-void *c2_mq_consume(struct c2_mq *q)
-{
-	BUG_ON(q->magic != C2_MQ_MAGIC);
-	BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
-	if (c2_mq_empty(q)) {
-		return NULL;
-	} else {
-#ifdef DEBUG
-		struct c2wr_hdr *m = (struct c2wr_hdr *)
-		    (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
-		BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
-#endif
-		return m;
-#else
-		return q->msg_pool.host + q->priv * q->msg_size;
-#endif
-	}
-}
-
-void c2_mq_free(struct c2_mq *q)
-{
-	BUG_ON(q->magic != C2_MQ_MAGIC);
-	BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
-	if (!c2_mq_empty(q)) {
-
-#ifdef CCMSGMAGIC
-		{
-			struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
-			    (q->msg_pool.adapter + q->priv * q->msg_size);
-			__raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
-		}
-#endif
-		q->priv = (q->priv + 1) % q->q_size;
-		/* Update peer's offset. */
-		__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
-	}
-}
-
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
-{
-	BUG_ON(q->magic != C2_MQ_MAGIC);
-	BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-	while (wqe_count--) {
-		BUG_ON(c2_mq_empty(q));
-		*q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
-	}
-}
-
-#if 0
-u32 c2_mq_count(struct c2_mq *q)
-{
-	s32 count;
-
-	if (q->type == C2_MQ_HOST_TARGET)
-		count = be16_to_cpu(*q->shared) - q->priv;
-	else
-		count = q->priv - be16_to_cpu(*q->shared);
-
-	if (count < 0)
-		count += q->q_size;
-
-	return (u32) count;
-}
-#endif  /*  0  */
-
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-		    u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
-{
-	BUG_ON(!q->shared);
-
-	/* This code assumes the byte swapping has already been done! */
-	q->index = index;
-	q->q_size = q_size;
-	q->msg_size = msg_size;
-	q->msg_pool.adapter = pool_start;
-	q->peer = (struct c2_mq_shared __iomem *) peer;
-	q->magic = C2_MQ_MAGIC;
-	q->type = type;
-	q->priv = 0;
-	q->hint_count = 0;
-	return;
-}
-
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-		    u8 *pool_start, u16 __iomem *peer, u32 type)
-{
-	BUG_ON(!q->shared);
-
-	/* This code assumes the byte swapping has already been done! */
-	q->index = index;
-	q->q_size = q_size;
-	q->msg_size = msg_size;
-	q->msg_pool.host = pool_start;
-	q->peer = (struct c2_mq_shared __iomem *) peer;
-	q->magic = C2_MQ_MAGIC;
-	q->type = type;
-	q->priv = 0;
-	q->hint_count = 0;
-	return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
deleted file mode 100644
index 8e1b4d1..0000000
--- a/drivers/staging/rdma/amso1100/c2_mq.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _C2_MQ_H_
-#define _C2_MQ_H_
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include "c2_wr.h"
-
-enum c2_shared_regs {
-
-	C2_SHARED_ARMED = 0x10,
-	C2_SHARED_NOTIFY = 0x18,
-	C2_SHARED_SHARED = 0x40,
-};
-
-struct c2_mq_shared {
-	u16 unused1;
-	u8 armed;
-	u8 notification_type;
-	u32 unused2;
-	u16 shared;
-	/* Pad to 64 bytes. */
-	u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
-};
-
-enum c2_mq_type {
-	C2_MQ_HOST_TARGET = 1,
-	C2_MQ_ADAPTER_TARGET = 2,
-};
-
-/*
- * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
- * c2_user_mq_t (which is the same format) is for user-mode MQs...
- */
-#define C2_MQ_MAGIC 0x4d512020	/* 'MQ  ' */
-struct c2_mq {
-	u32 magic;
-	union {
-		u8 *host;
-		u8 __iomem *adapter;
-	} msg_pool;
-	dma_addr_t host_dma;
-	DEFINE_DMA_UNMAP_ADDR(mapping);
-	u16 hint_count;
-	u16 priv;
-	struct c2_mq_shared __iomem *peer;
-	__be16 *shared;
-	dma_addr_t shared_dma;
-	u32 q_size;
-	u32 msg_size;
-	u32 index;
-	enum c2_mq_type type;
-};
-
-static __inline__ int c2_mq_empty(struct c2_mq *q)
-{
-	return q->priv == be16_to_cpu(*q->shared);
-}
-
-static __inline__ int c2_mq_full(struct c2_mq *q)
-{
-	return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
-}
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
-void *c2_mq_alloc(struct c2_mq *q);
-void c2_mq_produce(struct c2_mq *q);
-void *c2_mq_consume(struct c2_mq *q);
-void c2_mq_free(struct c2_mq *q);
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-		       u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-			   u8 *pool_start, u16 __iomem *peer, u32 type);
-
-#endif				/* _C2_MQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_pd.c b/drivers/staging/rdma/amso1100/c2_pd.c
deleted file mode 100644
index f3e81dc..0000000
--- a/drivers/staging/rdma/amso1100/c2_pd.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-
-#include "c2.h"
-#include "c2_provider.h"
-
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
-{
-	u32 obj;
-	int ret = 0;
-
-	spin_lock(&c2dev->pd_table.lock);
-	obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
-				 c2dev->pd_table.last);
-	if (obj >= c2dev->pd_table.max)
-		obj = find_first_zero_bit(c2dev->pd_table.table,
-					  c2dev->pd_table.max);
-	if (obj < c2dev->pd_table.max) {
-		pd->pd_id = obj;
-		__set_bit(obj, c2dev->pd_table.table);
-		c2dev->pd_table.last = obj+1;
-		if (c2dev->pd_table.last >= c2dev->pd_table.max)
-			c2dev->pd_table.last = 0;
-	} else
-		ret = -ENOMEM;
-	spin_unlock(&c2dev->pd_table.lock);
-	return ret;
-}
-
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
-{
-	spin_lock(&c2dev->pd_table.lock);
-	__clear_bit(pd->pd_id, c2dev->pd_table.table);
-	spin_unlock(&c2dev->pd_table.lock);
-}
-
-int c2_init_pd_table(struct c2_dev *c2dev)
-{
-
-	c2dev->pd_table.last = 0;
-	c2dev->pd_table.max = c2dev->props.max_pd;
-	spin_lock_init(&c2dev->pd_table.lock);
-	c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
-					sizeof(long), GFP_KERNEL);
-	if (!c2dev->pd_table.table)
-		return -ENOMEM;
-	bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
-	return 0;
-}
-
-void c2_cleanup_pd_table(struct c2_dev *c2dev)
-{
-	kfree(c2dev->pd_table.table);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
deleted file mode 100644
index de8d10e..0000000
--- a/drivers/staging/rdma/amso1100/c2_provider.c
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/if_arp.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include "c2.h"
-#include "c2_provider.h"
-#include "c2_user.h"
-
-static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-			   struct ib_udata *uhw)
-{
-	struct c2_dev *c2dev = to_c2dev(ibdev);
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	if (uhw->inlen || uhw->outlen)
-		return -EINVAL;
-
-	*props = c2dev->props;
-	return 0;
-}
-
-static int c2_query_port(struct ib_device *ibdev,
-			 u8 port, struct ib_port_attr *props)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	props->max_mtu = IB_MTU_4096;
-	props->lid = 0;
-	props->lmc = 0;
-	props->sm_lid = 0;
-	props->sm_sl = 0;
-	props->state = IB_PORT_ACTIVE;
-	props->phys_state = 0;
-	props->port_cap_flags =
-	    IB_PORT_CM_SUP |
-	    IB_PORT_REINIT_SUP |
-	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
-	props->gid_tbl_len = 1;
-	props->pkey_tbl_len = 1;
-	props->qkey_viol_cntr = 0;
-	props->active_width = 1;
-	props->active_speed = IB_SPEED_SDR;
-
-	return 0;
-}
-
-static int c2_query_pkey(struct ib_device *ibdev,
-			 u8 port, u16 index, u16 * pkey)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	*pkey = 0;
-	return 0;
-}
-
-static int c2_query_gid(struct ib_device *ibdev, u8 port,
-			int index, union ib_gid *gid)
-{
-	struct c2_dev *c2dev = to_c2dev(ibdev);
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
-	memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
-
-	return 0;
-}
-
-/* Allocate the user context data structure. This keeps track
- * of all objects associated with a particular user-mode client.
- */
-static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
-					     struct ib_udata *udata)
-{
-	struct c2_ucontext *context;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	context = kmalloc(sizeof(*context), GFP_KERNEL);
-	if (!context)
-		return ERR_PTR(-ENOMEM);
-
-	return &context->ibucontext;
-}
-
-static int c2_dealloc_ucontext(struct ib_ucontext *context)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	kfree(context);
-	return 0;
-}
-
-static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return -ENOSYS;
-}
-
-static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
-				 struct ib_ucontext *context,
-				 struct ib_udata *udata)
-{
-	struct c2_pd *pd;
-	int err;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
-	if (!pd)
-		return ERR_PTR(-ENOMEM);
-
-	err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
-	if (err) {
-		kfree(pd);
-		return ERR_PTR(err);
-	}
-
-	if (context) {
-		if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
-			c2_pd_free(to_c2dev(ibdev), pd);
-			kfree(pd);
-			return ERR_PTR(-EFAULT);
-		}
-	}
-
-	return &pd->ibpd;
-}
-
-static int c2_dealloc_pd(struct ib_pd *pd)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
-	kfree(pd);
-
-	return 0;
-}
-
-static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return ERR_PTR(-ENOSYS);
-}
-
-static int c2_ah_destroy(struct ib_ah *ah)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return -ENOSYS;
-}
-
-static void c2_add_ref(struct ib_qp *ibqp)
-{
-	struct c2_qp *qp;
-	BUG_ON(!ibqp);
-	qp = to_c2qp(ibqp);
-	atomic_inc(&qp->refcount);
-}
-
-static void c2_rem_ref(struct ib_qp *ibqp)
-{
-	struct c2_qp *qp;
-	BUG_ON(!ibqp);
-	qp = to_c2qp(ibqp);
-	if (atomic_dec_and_test(&qp->refcount))
-		wake_up(&qp->wait);
-}
-
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
-{
-	struct c2_dev* c2dev = to_c2dev(device);
-	struct c2_qp *qp;
-
-	qp = c2_find_qpn(c2dev, qpn);
-	pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
-		__func__, qp, qpn, device,
-		(qp?atomic_read(&qp->refcount):0));
-
-	return (qp?&qp->ibqp:NULL);
-}
-
-static struct ib_qp *c2_create_qp(struct ib_pd *pd,
-				  struct ib_qp_init_attr *init_attr,
-				  struct ib_udata *udata)
-{
-	struct c2_qp *qp;
-	int err;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	if (init_attr->create_flags)
-		return ERR_PTR(-EINVAL);
-
-	switch (init_attr->qp_type) {
-	case IB_QPT_RC:
-		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
-		if (!qp) {
-			pr_debug("%s: Unable to allocate QP\n", __func__);
-			return ERR_PTR(-ENOMEM);
-		}
-		spin_lock_init(&qp->lock);
-		if (pd->uobject) {
-			/* userspace specific */
-		}
-
-		err = c2_alloc_qp(to_c2dev(pd->device),
-				  to_c2pd(pd), init_attr, qp);
-
-		if (err && pd->uobject) {
-			/* userspace specific */
-		}
-
-		break;
-	default:
-		pr_debug("%s: Invalid QP type: %d\n", __func__,
-			init_attr->qp_type);
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (err) {
-		kfree(qp);
-		return ERR_PTR(err);
-	}
-
-	return &qp->ibqp;
-}
-
-static int c2_destroy_qp(struct ib_qp *ib_qp)
-{
-	struct c2_qp *qp = to_c2qp(ib_qp);
-
-	pr_debug("%s:%u qp=%p,qp->state=%d\n",
-		__func__, __LINE__, ib_qp, qp->state);
-	c2_free_qp(to_c2dev(ib_qp->device), qp);
-	kfree(qp);
-	return 0;
-}
-
-static struct ib_cq *c2_create_cq(struct ib_device *ibdev,
-				  const struct ib_cq_init_attr *attr,
-				  struct ib_ucontext *context,
-				  struct ib_udata *udata)
-{
-	int entries = attr->cqe;
-	struct c2_cq *cq;
-	int err;
-
-	if (attr->flags)
-		return ERR_PTR(-EINVAL);
-
-	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
-	if (!cq) {
-		pr_debug("%s: Unable to allocate CQ\n", __func__);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
-	if (err) {
-		pr_debug("%s: error initializing CQ\n", __func__);
-		kfree(cq);
-		return ERR_PTR(err);
-	}
-
-	return &cq->ibcq;
-}
-
-static int c2_destroy_cq(struct ib_cq *ib_cq)
-{
-	struct c2_cq *cq = to_c2cq(ib_cq);
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	c2_free_cq(to_c2dev(ib_cq->device), cq);
-	kfree(cq);
-
-	return 0;
-}
-
-static inline u32 c2_convert_access(int acc)
-{
-	return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
-	    (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
-	    (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
-	    C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
-}
-
-static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
-{
-	struct c2_mr *mr;
-	u64 *page_list;
-	const u32 total_len = 0xffffffff;	/* AMSO1100 limit */
-	int err, page_shift, pbl_depth, i;
-	u64 kva = 0;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	/*
-	 * This is a map of all phy mem...use a 32k page_shift.
-	 */
-	page_shift = PAGE_SHIFT + 3;
-	pbl_depth = ALIGN(total_len, BIT(page_shift)) >> page_shift;
-
-	page_list = vmalloc(sizeof(u64) * pbl_depth);
-	if (!page_list) {
-		pr_debug("couldn't vmalloc page_list of size %zd\n",
-			(sizeof(u64) * pbl_depth));
-		return ERR_PTR(-ENOMEM);
-	}
-
-	for (i = 0; i < pbl_depth; i++)
-		page_list[i] = (i << page_shift);
-
-	mr = kmalloc(sizeof(*mr), GFP_KERNEL);
-	if (!mr) {
-		vfree(page_list);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	mr->pd = to_c2pd(pd);
-	mr->umem = NULL;
-	pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
-		"*iova_start %llx, first pa %llx, last pa %llx\n",
-		__func__, page_shift, pbl_depth, total_len,
-		(unsigned long long) kva,
-	       	(unsigned long long) page_list[0],
-	       	(unsigned long long) page_list[pbl_depth-1]);
-	err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), page_list,
-					 BIT(page_shift), pbl_depth,
-					 total_len, 0, &kva,
-					 c2_convert_access(acc), mr);
-	vfree(page_list);
-	if (err) {
-		kfree(mr);
-		return ERR_PTR(err);
-	}
-
-	return &mr->ibmr;
-}
-
-static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-				    u64 virt, int acc, struct ib_udata *udata)
-{
-	u64 *pages;
-	u64 kva = 0;
-	int shift, n, len;
-	int i, k, entry;
-	int err = 0;
-	struct scatterlist *sg;
-	struct c2_pd *c2pd = to_c2pd(pd);
-	struct c2_mr *c2mr;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
-	if (!c2mr)
-		return ERR_PTR(-ENOMEM);
-	c2mr->pd = c2pd;
-
-	c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
-	if (IS_ERR(c2mr->umem)) {
-		err = PTR_ERR(c2mr->umem);
-		kfree(c2mr);
-		return ERR_PTR(err);
-	}
-
-	shift = ffs(c2mr->umem->page_size) - 1;
-	n = c2mr->umem->nmap;
-
-	pages = kmalloc_array(n, sizeof(u64), GFP_KERNEL);
-	if (!pages) {
-		err = -ENOMEM;
-		goto err;
-	}
-
-	i = 0;
-	for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
-		len = sg_dma_len(sg) >> shift;
-		for (k = 0; k < len; ++k) {
-			pages[i++] =
-				sg_dma_address(sg) +
-				(c2mr->umem->page_size * k);
-		}
-	}
-
-	kva = virt;
-  	err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
-					 pages,
-					 c2mr->umem->page_size,
-					 i,
-					 length,
-					 ib_umem_offset(c2mr->umem),
-					 &kva,
-					 c2_convert_access(acc),
-					 c2mr);
-	kfree(pages);
-	if (err)
-		goto err;
-	return &c2mr->ibmr;
-
-err:
-	ib_umem_release(c2mr->umem);
-	kfree(c2mr);
-	return ERR_PTR(err);
-}
-
-static int c2_dereg_mr(struct ib_mr *ib_mr)
-{
-	struct c2_mr *mr = to_c2mr(ib_mr);
-	int err;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
-	if (err)
-		pr_debug("c2_stag_dealloc failed: %d\n", err);
-	else {
-		if (mr->umem)
-			ib_umem_release(mr->umem);
-		kfree(mr);
-	}
-
-	return err;
-}
-
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
-			char *buf)
-{
-	struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return sprintf(buf, "%x\n", c2dev->props.hw_ver);
-}
-
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return sprintf(buf, "%x.%x.%x\n",
-		       (int) (c2dev->props.fw_ver >> 32),
-		       (int) (c2dev->props.fw_ver >> 16) & 0xffff,
-		       (int) (c2dev->props.fw_ver & 0xffff));
-}
-
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
-			char *buf)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return sprintf(buf, "AMSO1100\n");
-}
-
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
-			  char *buf)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *c2_dev_attributes[] = {
-	&dev_attr_hw_rev,
-	&dev_attr_fw_ver,
-	&dev_attr_hca_type,
-	&dev_attr_board_id
-};
-
-static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-			int attr_mask, struct ib_udata *udata)
-{
-	int err;
-
-	err =
-	    c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
-			 attr_mask);
-
-	return err;
-}
-
-static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return -ENOSYS;
-}
-
-static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return -ENOSYS;
-}
-
-static int c2_process_mad(struct ib_device *ibdev,
-			  int mad_flags,
-			  u8 port_num,
-			  const struct ib_wc *in_wc,
-			  const struct ib_grh *in_grh,
-			  const struct ib_mad_hdr *in_mad,
-			  size_t in_mad_size,
-			  struct ib_mad_hdr *out_mad,
-			  size_t *out_mad_size,
-			  u16 *out_mad_pkey_index)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	return -ENOSYS;
-}
-
-static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	/* Request a connection */
-	return c2_llp_connect(cm_id, iw_param);
-}
-
-static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	/* Accept the new connection */
-	return c2_llp_accept(cm_id, iw_param);
-}
-
-static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	return c2_llp_reject(cm_id, pdata, pdata_len);
-}
-
-static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
-{
-	int err;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	err = c2_llp_service_create(cm_id, backlog);
-	pr_debug("%s:%u err=%d\n",
-		__func__, __LINE__,
-		err);
-	return err;
-}
-
-static int c2_service_destroy(struct iw_cm_id *cm_id)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-
-	return c2_llp_service_destroy(cm_id);
-}
-
-static int c2_pseudo_up(struct net_device *netdev)
-{
-	struct in_device *ind;
-	struct c2_dev *c2dev = netdev->ml_priv;
-
-	ind = in_dev_get(netdev);
-	if (!ind)
-		return 0;
-
-	pr_debug("adding...\n");
-	for_ifa(ind) {
-#ifdef DEBUG
-		u8 *ip = (u8 *) & ifa->ifa_address;
-
-		pr_debug("%s: %d.%d.%d.%d\n",
-		       ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
-		c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
-	}
-	endfor_ifa(ind);
-	in_dev_put(ind);
-
-	return 0;
-}
-
-static int c2_pseudo_down(struct net_device *netdev)
-{
-	struct in_device *ind;
-	struct c2_dev *c2dev = netdev->ml_priv;
-
-	ind = in_dev_get(netdev);
-	if (!ind)
-		return 0;
-
-	pr_debug("deleting...\n");
-	for_ifa(ind) {
-#ifdef DEBUG
-		u8 *ip = (u8 *) & ifa->ifa_address;
-
-		pr_debug("%s: %d.%d.%d.%d\n",
-		       ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
-		c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
-	}
-	endfor_ifa(ind);
-	in_dev_put(ind);
-
-	return 0;
-}
-
-static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-	kfree_skb(skb);
-	return NETDEV_TX_OK;
-}
-
-static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
-{
-	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
-		return -EINVAL;
-
-	netdev->mtu = new_mtu;
-
-	/* TODO: Tell rnic about new rmda interface mtu */
-	return 0;
-}
-
-static const struct net_device_ops c2_pseudo_netdev_ops = {
-	.ndo_open 		= c2_pseudo_up,
-	.ndo_stop 		= c2_pseudo_down,
-	.ndo_start_xmit 	= c2_pseudo_xmit_frame,
-	.ndo_change_mtu 	= c2_pseudo_change_mtu,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-static void setup(struct net_device *netdev)
-{
-	netdev->netdev_ops = &c2_pseudo_netdev_ops;
-
-	netdev->watchdog_timeo = 0;
-	netdev->type = ARPHRD_ETHER;
-	netdev->mtu = 1500;
-	netdev->hard_header_len = ETH_HLEN;
-	netdev->addr_len = ETH_ALEN;
-	netdev->tx_queue_len = 0;
-	netdev->flags |= IFF_NOARP;
-}
-
-static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
-{
-	char name[IFNAMSIZ];
-	struct net_device *netdev;
-
-	/* change ethxxx to iwxxx */
-	strcpy(name, "iw");
-	strcat(name, &c2dev->netdev->name[3]);
-	netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
-	if (!netdev) {
-		printk(KERN_ERR PFX "%s -  etherdev alloc failed",
-			__func__);
-		return NULL;
-	}
-
-	netdev->ml_priv = c2dev;
-
-	SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
-	memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
-
-	/* Print out the MAC address */
-	pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr);
-
-#if 0
-	/* Disable network packets */
-	netif_stop_queue(netdev);
-#endif
-	return netdev;
-}
-
-static int c2_port_immutable(struct ib_device *ibdev, u8 port_num,
-			     struct ib_port_immutable *immutable)
-{
-	struct ib_port_attr attr;
-	int err;
-
-	err = c2_query_port(ibdev, port_num, &attr);
-	if (err)
-		return err;
-
-	immutable->pkey_tbl_len = attr.pkey_tbl_len;
-	immutable->gid_tbl_len = attr.gid_tbl_len;
-	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
-	return 0;
-}
-
-int c2_register_device(struct c2_dev *dev)
-{
-	int ret = -ENOMEM;
-	int i;
-
-	/* Register pseudo network device */
-	dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
-	if (!dev->pseudo_netdev)
-		goto out;
-
-	ret = register_netdev(dev->pseudo_netdev);
-	if (ret)
-		goto out_free_netdev;
-
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
-	dev->ibdev.owner = THIS_MODULE;
-	dev->ibdev.uverbs_cmd_mask =
-	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
-	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
-	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
-	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
-	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
-	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
-	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
-	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
-	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
-	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
-	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
-	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
-	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
-	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
-	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
-	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
-
-	dev->ibdev.node_type = RDMA_NODE_RNIC;
-	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
-	memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
-	dev->ibdev.phys_port_cnt = 1;
-	dev->ibdev.num_comp_vectors = 1;
-	dev->ibdev.dma_device = &dev->pcidev->dev;
-	dev->ibdev.query_device = c2_query_device;
-	dev->ibdev.query_port = c2_query_port;
-	dev->ibdev.query_pkey = c2_query_pkey;
-	dev->ibdev.query_gid = c2_query_gid;
-	dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
-	dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
-	dev->ibdev.mmap = c2_mmap_uar;
-	dev->ibdev.alloc_pd = c2_alloc_pd;
-	dev->ibdev.dealloc_pd = c2_dealloc_pd;
-	dev->ibdev.create_ah = c2_ah_create;
-	dev->ibdev.destroy_ah = c2_ah_destroy;
-	dev->ibdev.create_qp = c2_create_qp;
-	dev->ibdev.modify_qp = c2_modify_qp;
-	dev->ibdev.destroy_qp = c2_destroy_qp;
-	dev->ibdev.create_cq = c2_create_cq;
-	dev->ibdev.destroy_cq = c2_destroy_cq;
-	dev->ibdev.poll_cq = c2_poll_cq;
-	dev->ibdev.get_dma_mr = c2_get_dma_mr;
-	dev->ibdev.reg_user_mr = c2_reg_user_mr;
-	dev->ibdev.dereg_mr = c2_dereg_mr;
-	dev->ibdev.get_port_immutable = c2_port_immutable;
-
-	dev->ibdev.alloc_fmr = NULL;
-	dev->ibdev.unmap_fmr = NULL;
-	dev->ibdev.dealloc_fmr = NULL;
-	dev->ibdev.map_phys_fmr = NULL;
-
-	dev->ibdev.attach_mcast = c2_multicast_attach;
-	dev->ibdev.detach_mcast = c2_multicast_detach;
-	dev->ibdev.process_mad = c2_process_mad;
-
-	dev->ibdev.req_notify_cq = c2_arm_cq;
-	dev->ibdev.post_send = c2_post_send;
-	dev->ibdev.post_recv = c2_post_receive;
-
-	dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
-	if (dev->ibdev.iwcm == NULL) {
-		ret = -ENOMEM;
-		goto out_unregister_netdev;
-	}
-	dev->ibdev.iwcm->add_ref = c2_add_ref;
-	dev->ibdev.iwcm->rem_ref = c2_rem_ref;
-	dev->ibdev.iwcm->get_qp = c2_get_qp;
-	dev->ibdev.iwcm->connect = c2_connect;
-	dev->ibdev.iwcm->accept = c2_accept;
-	dev->ibdev.iwcm->reject = c2_reject;
-	dev->ibdev.iwcm->create_listen = c2_service_create;
-	dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
-
-	ret = ib_register_device(&dev->ibdev, NULL);
-	if (ret)
-		goto out_free_iwcm;
-
-	for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
-		ret = device_create_file(&dev->ibdev.dev,
-					       c2_dev_attributes[i]);
-		if (ret)
-			goto out_unregister_ibdev;
-	}
-	goto out;
-
-out_unregister_ibdev:
-	ib_unregister_device(&dev->ibdev);
-out_free_iwcm:
-	kfree(dev->ibdev.iwcm);
-out_unregister_netdev:
-	unregister_netdev(dev->pseudo_netdev);
-out_free_netdev:
-	free_netdev(dev->pseudo_netdev);
-out:
-	pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
-	return ret;
-}
-
-void c2_unregister_device(struct c2_dev *dev)
-{
-	pr_debug("%s:%u\n", __func__, __LINE__);
-	unregister_netdev(dev->pseudo_netdev);
-	free_netdev(dev->pseudo_netdev);
-	ib_unregister_device(&dev->ibdev);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.h b/drivers/staging/rdma/amso1100/c2_provider.h
deleted file mode 100644
index bf18998..0000000
--- a/drivers/staging/rdma/amso1100/c2_provider.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_PROVIDER_H
-#define C2_PROVIDER_H
-#include <linux/inetdevice.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-
-#include "c2_mq.h"
-#include <rdma/iw_cm.h>
-
-#define C2_MPT_FLAG_ATOMIC        (1 << 14)
-#define C2_MPT_FLAG_REMOTE_WRITE  (1 << 13)
-#define C2_MPT_FLAG_REMOTE_READ   (1 << 12)
-#define C2_MPT_FLAG_LOCAL_WRITE   (1 << 11)
-#define C2_MPT_FLAG_LOCAL_READ    (1 << 10)
-
-struct c2_buf_list {
-	void *buf;
-	DEFINE_DMA_UNMAP_ADDR(mapping);
-};
-
-
-/* The user context keeps track of objects allocated for a
- * particular user-mode client. */
-struct c2_ucontext {
-	struct ib_ucontext ibucontext;
-};
-
-struct c2_mtt;
-
-/* All objects associated with a PD are kept in the
- * associated user context if present.
- */
-struct c2_pd {
-	struct ib_pd ibpd;
-	u32 pd_id;
-};
-
-struct c2_mr {
-	struct ib_mr ibmr;
-	struct c2_pd *pd;
-	struct ib_umem *umem;
-};
-
-struct c2_av;
-
-enum c2_ah_type {
-	C2_AH_ON_HCA,
-	C2_AH_PCI_POOL,
-	C2_AH_KMALLOC
-};
-
-struct c2_ah {
-	struct ib_ah ibah;
-};
-
-struct c2_cq {
-	struct ib_cq ibcq;
-	spinlock_t lock;
-	atomic_t refcount;
-	int cqn;
-	int is_kernel;
-	wait_queue_head_t wait;
-
-	u32 adapter_handle;
-	struct c2_mq mq;
-};
-
-struct c2_wq {
-	spinlock_t lock;
-};
-struct iw_cm_id;
-struct c2_qp {
-	struct ib_qp ibqp;
-	struct iw_cm_id *cm_id;
-	spinlock_t lock;
-	atomic_t refcount;
-	wait_queue_head_t wait;
-	int qpn;
-
-	u32 adapter_handle;
-	u32 send_sgl_depth;
-	u32 recv_sgl_depth;
-	u32 rdma_write_sgl_depth;
-	u8 state;
-
-	struct c2_mq sq_mq;
-	struct c2_mq rq_mq;
-};
-
-struct c2_cr_query_attrs {
-	u32 local_addr;
-	u32 remote_addr;
-	u16 local_port;
-	u16 remote_port;
-};
-
-static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
-{
-	return container_of(ibpd, struct c2_pd, ibpd);
-}
-
-static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
-{
-	return container_of(ibucontext, struct c2_ucontext, ibucontext);
-}
-
-static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
-{
-	return container_of(ibmr, struct c2_mr, ibmr);
-}
-
-
-static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
-{
-	return container_of(ibah, struct c2_ah, ibah);
-}
-
-static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
-{
-	return container_of(ibcq, struct c2_cq, ibcq);
-}
-
-static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
-{
-	return container_of(ibqp, struct c2_qp, ibqp);
-}
-
-static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
-{
-	struct in_device *ind;
-	int ret = 0;
-
-	ind = in_dev_get(netdev);
-	if (!ind)
-		return 0;
-
-	for_ifa(ind) {
-		if (ifa->ifa_address == addr) {
-			ret = 1;
-			break;
-		}
-	}
-	endfor_ifa(ind);
-	in_dev_put(ind);
-	return ret;
-}
-#endif				/* C2_PROVIDER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
deleted file mode 100644
index ca364db..0000000
--- a/drivers/staging/rdma/amso1100/c2_qp.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_MAX_ORD_PER_QP 128
-#define C2_MAX_IRD_PER_QP 128
-
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-#define NO_SUPPORT -1
-static const u8 c2_opcode[] = {
-	[IB_WR_SEND] = C2_WR_TYPE_SEND,
-	[IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
-	[IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
-	[IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
-	[IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
-	[IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
-	[IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
-};
-
-static int to_c2_state(enum ib_qp_state ib_state)
-{
-	switch (ib_state) {
-	case IB_QPS_RESET:
-		return C2_QP_STATE_IDLE;
-	case IB_QPS_RTS:
-		return C2_QP_STATE_RTS;
-	case IB_QPS_SQD:
-		return C2_QP_STATE_CLOSING;
-	case IB_QPS_SQE:
-		return C2_QP_STATE_CLOSING;
-	case IB_QPS_ERR:
-		return C2_QP_STATE_ERROR;
-	default:
-		return -1;
-	}
-}
-
-static int to_ib_state(enum c2_qp_state c2_state)
-{
-	switch (c2_state) {
-	case C2_QP_STATE_IDLE:
-		return IB_QPS_RESET;
-	case C2_QP_STATE_CONNECTING:
-		return IB_QPS_RTR;
-	case C2_QP_STATE_RTS:
-		return IB_QPS_RTS;
-	case C2_QP_STATE_CLOSING:
-		return IB_QPS_SQD;
-	case C2_QP_STATE_ERROR:
-		return IB_QPS_ERR;
-	case C2_QP_STATE_TERMINATE:
-		return IB_QPS_SQE;
-	default:
-		return -1;
-	}
-}
-
-static const char *to_ib_state_str(int ib_state)
-{
-	static const char *state_str[] = {
-		"IB_QPS_RESET",
-		"IB_QPS_INIT",
-		"IB_QPS_RTR",
-		"IB_QPS_RTS",
-		"IB_QPS_SQD",
-		"IB_QPS_SQE",
-		"IB_QPS_ERR"
-	};
-	if (ib_state < IB_QPS_RESET ||
-	    ib_state > IB_QPS_ERR)
-		return "<invalid IB QP state>";
-
-	ib_state -= IB_QPS_RESET;
-	return state_str[ib_state];
-}
-
-void c2_set_qp_state(struct c2_qp *qp, int c2_state)
-{
-	int new_state = to_ib_state(c2_state);
-
-	pr_debug("%s: qp[%p] state modify %s --> %s\n",
-	       __func__,
-		qp,
-		to_ib_state_str(qp->state),
-		to_ib_state_str(new_state));
-	qp->state = new_state;
-}
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
-		 struct ib_qp_attr *attr, int attr_mask)
-{
-	struct c2wr_qp_modify_req wr;
-	struct c2wr_qp_modify_rep *reply;
-	struct c2_vq_req *vq_req;
-	unsigned long flags;
-	u8 next_state;
-	int err;
-
-	pr_debug("%s:%d qp=%p, %s --> %s\n",
-		__func__, __LINE__,
-		qp,
-		to_ib_state_str(qp->state),
-		to_ib_state_str(attr->qp_state));
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	c2_wr_set_id(&wr, CCWR_QP_MODIFY);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.qp_handle = qp->adapter_handle;
-	wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-	wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-	wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-	wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
-	if (attr_mask & IB_QP_STATE) {
-		/* Ensure the state is valid */
-		if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
-			err = -EINVAL;
-			goto bail0;
-		}
-
-		wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
-
-		if (attr->qp_state == IB_QPS_ERR) {
-			spin_lock_irqsave(&qp->lock, flags);
-			if (qp->cm_id && qp->state == IB_QPS_RTS) {
-				pr_debug("Generating CLOSE event for QP-->ERR, "
-					"qp=%p, cm_id=%p\n",qp,qp->cm_id);
-				/* Generate an CLOSE event */
-				vq_req->cm_id = qp->cm_id;
-				vq_req->event = IW_CM_EVENT_CLOSE;
-			}
-			spin_unlock_irqrestore(&qp->lock, flags);
-		}
-		next_state =  attr->qp_state;
-
-	} else if (attr_mask & IB_QP_CUR_STATE) {
-
-		if (attr->cur_qp_state != IB_QPS_RTR &&
-		    attr->cur_qp_state != IB_QPS_RTS &&
-		    attr->cur_qp_state != IB_QPS_SQD &&
-		    attr->cur_qp_state != IB_QPS_SQE) {
-			err = -EINVAL;
-			goto bail0;
-		} else
-			wr.next_qp_state =
-			    cpu_to_be32(to_c2_state(attr->cur_qp_state));
-
-		next_state = attr->cur_qp_state;
-
-	} else {
-		err = 0;
-		goto bail0;
-	}
-
-	/* reference the request struct */
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail0;
-
-	reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	err = c2_errno(reply);
-	if (!err)
-		qp->state = next_state;
-#ifdef DEBUG
-	else
-		pr_debug("%s: c2_errno=%d\n", __func__, err);
-#endif
-	/*
-	 * If we're going to error and generating the event here, then
-	 * we need to remove the reference because there will be no
-	 * close event generated by the adapter
-	*/
-	spin_lock_irqsave(&qp->lock, flags);
-	if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
-		qp->cm_id->rem_ref(qp->cm_id);
-		qp->cm_id = NULL;
-	}
-	spin_unlock_irqrestore(&qp->lock, flags);
-
-	vq_repbuf_free(c2dev, reply);
-bail0:
-	vq_req_free(c2dev, vq_req);
-
-	pr_debug("%s:%d qp=%p, cur_state=%s\n",
-		__func__, __LINE__,
-		qp,
-		to_ib_state_str(qp->state));
-	return err;
-}
-
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
-			  int ord, int ird)
-{
-	struct c2wr_qp_modify_req wr;
-	struct c2wr_qp_modify_rep *reply;
-	struct c2_vq_req *vq_req;
-	int err;
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	c2_wr_set_id(&wr, CCWR_QP_MODIFY);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.qp_handle = qp->adapter_handle;
-	wr.ord = cpu_to_be32(ord);
-	wr.ird = cpu_to_be32(ird);
-	wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-	wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-	wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
-	/* reference the request struct */
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail0;
-
-	reply = (struct c2wr_qp_modify_rep *) (unsigned long)
-		vq_req->reply_msg;
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	err = c2_errno(reply);
-	vq_repbuf_free(c2dev, reply);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-	struct c2_vq_req *vq_req;
-	struct c2wr_qp_destroy_req wr;
-	struct c2wr_qp_destroy_rep *reply;
-	unsigned long flags;
-	int err;
-
-	/*
-	 * Allocate a verb request message
-	 */
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req) {
-		return -ENOMEM;
-	}
-
-	/*
-	 * Initialize the WR
-	 */
-	c2_wr_set_id(&wr, CCWR_QP_DESTROY);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.qp_handle = qp->adapter_handle;
-
-	/*
-	 * reference the request struct.  dereferenced in the int handler.
-	 */
-	vq_req_get(c2dev, vq_req);
-
-	spin_lock_irqsave(&qp->lock, flags);
-	if (qp->cm_id && qp->state == IB_QPS_RTS) {
-		pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
-			"qp=%p, cm_id=%p\n",qp,qp->cm_id);
-		/* Generate an CLOSE event */
-		vq_req->qp = qp;
-		vq_req->cm_id = qp->cm_id;
-		vq_req->event = IW_CM_EVENT_CLOSE;
-	}
-	spin_unlock_irqrestore(&qp->lock, flags);
-
-	/*
-	 * Send WR to adapter
-	 */
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	/*
-	 * Wait for reply from adapter
-	 */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err) {
-		goto bail0;
-	}
-
-	/*
-	 * Process reply
-	 */
-	reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	spin_lock_irqsave(&qp->lock, flags);
-	if (qp->cm_id) {
-		qp->cm_id->rem_ref(qp->cm_id);
-		qp->cm_id = NULL;
-	}
-	spin_unlock_irqrestore(&qp->lock, flags);
-
-	vq_repbuf_free(c2dev, reply);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-	int ret;
-
-	idr_preload(GFP_KERNEL);
-	spin_lock_irq(&c2dev->qp_table.lock);
-
-	ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
-	if (ret >= 0)
-		qp->qpn = ret;
-
-	spin_unlock_irq(&c2dev->qp_table.lock);
-	idr_preload_end();
-	return ret < 0 ? ret : 0;
-}
-
-static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
-{
-	spin_lock_irq(&c2dev->qp_table.lock);
-	idr_remove(&c2dev->qp_table.idr, qpn);
-	spin_unlock_irq(&c2dev->qp_table.lock);
-}
-
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
-{
-	unsigned long flags;
-	struct c2_qp *qp;
-
-	spin_lock_irqsave(&c2dev->qp_table.lock, flags);
-	qp = idr_find(&c2dev->qp_table.idr, qpn);
-	spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
-	return qp;
-}
-
-int c2_alloc_qp(struct c2_dev *c2dev,
-		struct c2_pd *pd,
-		struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
-{
-	struct c2wr_qp_create_req wr;
-	struct c2wr_qp_create_rep *reply;
-	struct c2_vq_req *vq_req;
-	struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
-	struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
-	unsigned long peer_pa;
-	u32 q_size, msg_size, mmap_size;
-	void __iomem *mmap;
-	int err;
-
-	err = c2_alloc_qpn(c2dev, qp);
-	if (err)
-		return err;
-	qp->ibqp.qp_num = qp->qpn;
-	qp->ibqp.qp_type = IB_QPT_RC;
-
-	/* Allocate the SQ and RQ shared pointers */
-	qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-					 &qp->sq_mq.shared_dma, GFP_KERNEL);
-	if (!qp->sq_mq.shared) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-					 &qp->rq_mq.shared_dma, GFP_KERNEL);
-	if (!qp->rq_mq.shared) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	/* Allocate the verbs request */
-	vq_req = vq_req_alloc(c2dev);
-	if (vq_req == NULL) {
-		err = -ENOMEM;
-		goto bail2;
-	}
-
-	/* Initialize the work request */
-	memset(&wr, 0, sizeof(wr));
-	c2_wr_set_id(&wr, CCWR_QP_CREATE);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-	wr.sq_cq_handle = send_cq->adapter_handle;
-	wr.rq_cq_handle = recv_cq->adapter_handle;
-	wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
-	wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
-	wr.srq_handle = 0;
-	wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
-			       QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
-	wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
-	wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
-	wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
-	wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
-	wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
-	wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
-	wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
-	wr.pd_id = pd->pd_id;
-	wr.user_context = (unsigned long) qp;
-
-	vq_req_get(c2dev, vq_req);
-
-	/* Send the WR to the adapter */
-	err = vq_send_wr(c2dev, (union c2wr *) & wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail3;
-	}
-
-	/* Wait for the verb reply  */
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err) {
-		goto bail3;
-	}
-
-	/* Process the reply */
-	reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail3;
-	}
-
-	if ((err = c2_wr_get_result(reply)) != 0) {
-		goto bail4;
-	}
-
-	/* Fill in the kernel QP struct */
-	atomic_set(&qp->refcount, 1);
-	qp->adapter_handle = reply->qp_handle;
-	qp->state = IB_QPS_RESET;
-	qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
-	qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
-	qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
-	init_waitqueue_head(&qp->wait);
-
-	/* Initialize the SQ MQ */
-	q_size = be32_to_cpu(reply->sq_depth);
-	msg_size = be32_to_cpu(reply->sq_msg_size);
-	peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
-	mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
-	mmap = ioremap_nocache(peer_pa, mmap_size);
-	if (!mmap) {
-		err = -ENOMEM;
-		goto bail5;
-	}
-
-	c2_mq_req_init(&qp->sq_mq,
-		       be32_to_cpu(reply->sq_mq_index),
-		       q_size,
-		       msg_size,
-		       mmap + sizeof(struct c2_mq_shared),	/* pool start */
-		       mmap,				/* peer */
-		       C2_MQ_ADAPTER_TARGET);
-
-	/* Initialize the RQ mq */
-	q_size = be32_to_cpu(reply->rq_depth);
-	msg_size = be32_to_cpu(reply->rq_msg_size);
-	peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
-	mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
-	mmap = ioremap_nocache(peer_pa, mmap_size);
-	if (!mmap) {
-		err = -ENOMEM;
-		goto bail6;
-	}
-
-	c2_mq_req_init(&qp->rq_mq,
-		       be32_to_cpu(reply->rq_mq_index),
-		       q_size,
-		       msg_size,
-		       mmap + sizeof(struct c2_mq_shared),	/* pool start */
-		       mmap,				/* peer */
-		       C2_MQ_ADAPTER_TARGET);
-
-	vq_repbuf_free(c2dev, reply);
-	vq_req_free(c2dev, vq_req);
-
-	return 0;
-
-bail6:
-	iounmap(qp->sq_mq.peer);
-bail5:
-	destroy_qp(c2dev, qp);
-bail4:
-	vq_repbuf_free(c2dev, reply);
-bail3:
-	vq_req_free(c2dev, vq_req);
-bail2:
-	c2_free_mqsp(qp->rq_mq.shared);
-bail1:
-	c2_free_mqsp(qp->sq_mq.shared);
-bail0:
-	c2_free_qpn(c2dev, qp->qpn);
-	return err;
-}
-
-static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
-	if (send_cq == recv_cq)
-		spin_lock_irq(&send_cq->lock);
-	else if (send_cq > recv_cq) {
-		spin_lock_irq(&send_cq->lock);
-		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
-	} else {
-		spin_lock_irq(&recv_cq->lock);
-		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
-	}
-}
-
-static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
-	if (send_cq == recv_cq)
-		spin_unlock_irq(&send_cq->lock);
-	else if (send_cq > recv_cq) {
-		spin_unlock(&recv_cq->lock);
-		spin_unlock_irq(&send_cq->lock);
-	} else {
-		spin_unlock(&send_cq->lock);
-		spin_unlock_irq(&recv_cq->lock);
-	}
-}
-
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-	struct c2_cq *send_cq;
-	struct c2_cq *recv_cq;
-
-	send_cq = to_c2cq(qp->ibqp.send_cq);
-	recv_cq = to_c2cq(qp->ibqp.recv_cq);
-
-	/*
-	 * Lock CQs here, so that CQ polling code can do QP lookup
-	 * without taking a lock.
-	 */
-	c2_lock_cqs(send_cq, recv_cq);
-	c2_free_qpn(c2dev, qp->qpn);
-	c2_unlock_cqs(send_cq, recv_cq);
-
-	/*
-	 * Destroy qp in the rnic...
-	 */
-	destroy_qp(c2dev, qp);
-
-	/*
-	 * Mark any unreaped CQEs as null and void.
-	 */
-	c2_cq_clean(c2dev, qp, send_cq->cqn);
-	if (send_cq != recv_cq)
-		c2_cq_clean(c2dev, qp, recv_cq->cqn);
-	/*
-	 * Unmap the MQs and return the shared pointers
-	 * to the message pool.
-	 */
-	iounmap(qp->sq_mq.peer);
-	iounmap(qp->rq_mq.peer);
-	c2_free_mqsp(qp->sq_mq.shared);
-	c2_free_mqsp(qp->rq_mq.shared);
-
-	atomic_dec(&qp->refcount);
-	wait_event(qp->wait, !atomic_read(&qp->refcount));
-}
-
-/*
- * Function: move_sgl
- *
- * Description:
- * Move an SGL from the user's work request struct into a CCIL Work Request
- * message, swapping to WR byte order and ensure the total length doesn't
- * overflow.
- *
- * IN:
- * dst		- ptr to CCIL Work Request message SGL memory.
- * src		- ptr to the consumers SGL memory.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int
-move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
-	 u8 * actual_count)
-{
-	u32 tot = 0;		/* running total */
-	u8 acount = 0;		/* running total non-0 len sge's */
-
-	while (count > 0) {
-		/*
-		 * If the addition of this SGE causes the
-		 * total SGL length to exceed 2^32-1, then
-		 * fail-n-bail.
-		 *
-		 * If the current total plus the next element length
-		 * wraps, then it will go negative and be less than the
-		 * current total...
-		 */
-		if ((tot + src->length) < tot) {
-			return -EINVAL;
-		}
-		/*
-		 * Bug: 1456 (as well as 1498 & 1643)
-		 * Skip over any sge's supplied with len=0
-		 */
-		if (src->length) {
-			tot += src->length;
-			dst->stag = cpu_to_be32(src->lkey);
-			dst->to = cpu_to_be64(src->addr);
-			dst->length = cpu_to_be32(src->length);
-			dst++;
-			acount++;
-		}
-		src++;
-		count--;
-	}
-
-	if (acount == 0) {
-		/*
-		 * Bug: 1476 (as well as 1498, 1456 and 1643)
-		 * Setup the SGL in the WR to make it easier for the RNIC.
-		 * This way, the FW doesn't have to deal with special cases.
-		 * Setting length=0 should be sufficient.
-		 */
-		dst->stag = 0;
-		dst->to = 0;
-		dst->length = 0;
-	}
-
-	*p_len = tot;
-	*actual_count = acount;
-	return 0;
-}
-
-/*
- * Function: c2_activity (private function)
- *
- * Description:
- * Post an mq index to the host->adapter activity fifo.
- *
- * IN:
- * c2dev	- ptr to c2dev structure
- * mq_index	- mq index to post
- * shared	- value most recently written to shared
- *
- * OUT:
- *
- * Return:
- * none
- */
-static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
-{
-	/*
-	 * First read the register to see if the FIFO is full, and if so,
-	 * spin until it's not.  This isn't perfect -- there is no
-	 * synchronization among the clients of the register, but in
-	 * practice it prevents multiple CPU from hammering the bus
-	 * with PCI RETRY. Note that when this does happen, the card
-	 * cannot get on the bus and the card and system hang in a
-	 * deadlock -- thus the need for this code. [TOT]
-	 */
-	while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
-		udelay(10);
-
-	__raw_writel(C2_HINT_MAKE(mq_index, shared),
-		     c2dev->regs + PCI_BAR0_ADAPTER_HINT);
-}
-
-/*
- * Function: qp_wr_post
- *
- * Description:
- * This in-line function allocates a MQ msg, then moves the host-copy of
- * the completed WR into msg.  Then it posts the message.
- *
- * IN:
- * q		- ptr to user MQ.
- * wr		- ptr to host-copy of the WR.
- * qp		- ptr to user qp
- * size		- Number of bytes to post.  Assumed to be divisible by 4.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
-{
-	union c2wr *msg;
-
-	msg = c2_mq_alloc(q);
-	if (msg == NULL) {
-		return -EINVAL;
-	}
-#ifdef CCMSGMAGIC
-	((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-
-	/*
-	 * Since all header fields in the WR are the same as the
-	 * CQE, set the following so the adapter need not.
-	 */
-	c2_wr_set_result(wr, CCERR_PENDING);
-
-	/*
-	 * Copy the wr down to the adapter
-	 */
-	memcpy((void *) msg, (void *) wr, size);
-
-	c2_mq_produce(q);
-	return 0;
-}
-
-
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
-		 struct ib_send_wr **bad_wr)
-{
-	struct c2_dev *c2dev = to_c2dev(ibqp->device);
-	struct c2_qp *qp = to_c2qp(ibqp);
-	union c2wr wr;
-	unsigned long lock_flags;
-	int err = 0;
-
-	u32 flags;
-	u32 tot_len;
-	u8 actual_sge_count;
-	u32 msg_size;
-
-	if (qp->state > IB_QPS_RTS) {
-		err = -EINVAL;
-		goto out;
-	}
-
-	while (ib_wr) {
-
-		flags = 0;
-		wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
-		if (ib_wr->send_flags & IB_SEND_SIGNALED) {
-			flags |= SQ_SIGNALED;
-		}
-
-		switch (ib_wr->opcode) {
-		case IB_WR_SEND:
-		case IB_WR_SEND_WITH_INV:
-			if (ib_wr->opcode == IB_WR_SEND) {
-				if (ib_wr->send_flags & IB_SEND_SOLICITED)
-					c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
-				else
-					c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
-				wr.sqwr.send.remote_stag = 0;
-			} else {
-				if (ib_wr->send_flags & IB_SEND_SOLICITED)
-					c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
-				else
-					c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
-				wr.sqwr.send.remote_stag =
-					cpu_to_be32(ib_wr->ex.invalidate_rkey);
-			}
-
-			msg_size = sizeof(struct c2wr_send_req) +
-				sizeof(struct c2_data_addr) * ib_wr->num_sge;
-			if (ib_wr->num_sge > qp->send_sgl_depth) {
-				err = -EINVAL;
-				break;
-			}
-			if (ib_wr->send_flags & IB_SEND_FENCE) {
-				flags |= SQ_READ_FENCE;
-			}
-			err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
-				       ib_wr->sg_list,
-				       ib_wr->num_sge,
-				       &tot_len, &actual_sge_count);
-			wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
-			c2_wr_set_sge_count(&wr, actual_sge_count);
-			break;
-		case IB_WR_RDMA_WRITE:
-			c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
-			msg_size = sizeof(struct c2wr_rdma_write_req) +
-			    (sizeof(struct c2_data_addr) * ib_wr->num_sge);
-			if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
-				err = -EINVAL;
-				break;
-			}
-			if (ib_wr->send_flags & IB_SEND_FENCE) {
-				flags |= SQ_READ_FENCE;
-			}
-			wr.sqwr.rdma_write.remote_stag =
-			    cpu_to_be32(rdma_wr(ib_wr)->rkey);
-			wr.sqwr.rdma_write.remote_to =
-			    cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
-			err = move_sgl((struct c2_data_addr *)
-				       & (wr.sqwr.rdma_write.data),
-				       ib_wr->sg_list,
-				       ib_wr->num_sge,
-				       &tot_len, &actual_sge_count);
-			wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
-			c2_wr_set_sge_count(&wr, actual_sge_count);
-			break;
-		case IB_WR_RDMA_READ:
-			c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
-			msg_size = sizeof(struct c2wr_rdma_read_req);
-
-			/* IWarp only suppots 1 sge for RDMA reads */
-			if (ib_wr->num_sge > 1) {
-				err = -EINVAL;
-				break;
-			}
-
-			/*
-			 * Move the local and remote stag/to/len into the WR.
-			 */
-			wr.sqwr.rdma_read.local_stag =
-			    cpu_to_be32(ib_wr->sg_list->lkey);
-			wr.sqwr.rdma_read.local_to =
-			    cpu_to_be64(ib_wr->sg_list->addr);
-			wr.sqwr.rdma_read.remote_stag =
-			    cpu_to_be32(rdma_wr(ib_wr)->rkey);
-			wr.sqwr.rdma_read.remote_to =
-			    cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
-			wr.sqwr.rdma_read.length =
-			    cpu_to_be32(ib_wr->sg_list->length);
-			break;
-		default:
-			/* error */
-			msg_size = 0;
-			err = -EINVAL;
-			break;
-		}
-
-		/*
-		 * If we had an error on the last wr build, then
-		 * break out.  Possible errors include bogus WR
-		 * type, and a bogus SGL length...
-		 */
-		if (err) {
-			break;
-		}
-
-		/*
-		 * Store flags
-		 */
-		c2_wr_set_flags(&wr, flags);
-
-		/*
-		 * Post the puppy!
-		 */
-		spin_lock_irqsave(&qp->lock, lock_flags);
-		err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
-		if (err) {
-			spin_unlock_irqrestore(&qp->lock, lock_flags);
-			break;
-		}
-
-		/*
-		 * Enqueue mq index to activity FIFO.
-		 */
-		c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
-		spin_unlock_irqrestore(&qp->lock, lock_flags);
-
-		ib_wr = ib_wr->next;
-	}
-
-out:
-	if (err)
-		*bad_wr = ib_wr;
-	return err;
-}
-
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
-		    struct ib_recv_wr **bad_wr)
-{
-	struct c2_dev *c2dev = to_c2dev(ibqp->device);
-	struct c2_qp *qp = to_c2qp(ibqp);
-	union c2wr wr;
-	unsigned long lock_flags;
-	int err = 0;
-
-	if (qp->state > IB_QPS_RTS) {
-		err = -EINVAL;
-		goto out;
-	}
-
-	/*
-	 * Try and post each work request
-	 */
-	while (ib_wr) {
-		u32 tot_len;
-		u8 actual_sge_count;
-
-		if (ib_wr->num_sge > qp->recv_sgl_depth) {
-			err = -EINVAL;
-			break;
-		}
-
-		/*
-		 * Create local host-copy of the WR
-		 */
-		wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
-		c2_wr_set_id(&wr, CCWR_RECV);
-		c2_wr_set_flags(&wr, 0);
-
-		/* sge_count is limited to eight bits. */
-		BUG_ON(ib_wr->num_sge >= 256);
-		err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
-			       ib_wr->sg_list,
-			       ib_wr->num_sge, &tot_len, &actual_sge_count);
-		c2_wr_set_sge_count(&wr, actual_sge_count);
-
-		/*
-		 * If we had an error on the last wr build, then
-		 * break out.  Possible errors include bogus WR
-		 * type, and a bogus SGL length...
-		 */
-		if (err) {
-			break;
-		}
-
-		spin_lock_irqsave(&qp->lock, lock_flags);
-		err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
-		if (err) {
-			spin_unlock_irqrestore(&qp->lock, lock_flags);
-			break;
-		}
-
-		/*
-		 * Enqueue mq index to activity FIFO
-		 */
-		c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
-		spin_unlock_irqrestore(&qp->lock, lock_flags);
-
-		ib_wr = ib_wr->next;
-	}
-
-out:
-	if (err)
-		*bad_wr = ib_wr;
-	return err;
-}
-
-void c2_init_qp_table(struct c2_dev *c2dev)
-{
-	spin_lock_init(&c2dev->qp_table.lock);
-	idr_init(&c2dev->qp_table.idr);
-}
-
-void c2_cleanup_qp_table(struct c2_dev *c2dev)
-{
-	idr_destroy(&c2dev->qp_table.idr);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
deleted file mode 100644
index 5e65c6d..0000000
--- a/drivers/staging/rdma/amso1100/c2_rnic.c
+++ /dev/null
@@ -1,652 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/inet.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <linux/route.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_vq.h"
-
-/* Device capabilities */
-#define C2_MIN_PAGESIZE  1024
-
-#define C2_MAX_MRS       32768
-#define C2_MAX_QPS       16000
-#define C2_MAX_WQE_SZ    256
-#define C2_MAX_QP_WR     ((128*1024)/C2_MAX_WQE_SZ)
-#define C2_MAX_SGES      4
-#define C2_MAX_SGE_RD    1
-#define C2_MAX_CQS       32768
-#define C2_MAX_CQES      4096
-#define C2_MAX_PDS       16384
-
-/*
- * Send the adapter INIT message to the amso1100
- */
-static int c2_adapter_init(struct c2_dev *c2dev)
-{
-	struct c2wr_init_req wr;
-
-	memset(&wr, 0, sizeof(wr));
-	c2_wr_set_id(&wr, CCWR_INIT);
-	wr.hdr.context = 0;
-	wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
-	wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
-	wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
-	wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
-	wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
-	wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
-
-	/* Post the init message */
-	return vq_send_wr(c2dev, (union c2wr *) & wr);
-}
-
-/*
- * Send the adapter TERM message to the amso1100
- */
-static void c2_adapter_term(struct c2_dev *c2dev)
-{
-	struct c2wr_init_req wr;
-
-	memset(&wr, 0, sizeof(wr));
-	c2_wr_set_id(&wr, CCWR_TERM);
-	wr.hdr.context = 0;
-
-	/* Post the init message */
-	vq_send_wr(c2dev, (union c2wr *) & wr);
-	c2dev->init = 0;
-
-	return;
-}
-
-/*
- * Query the adapter
- */
-static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
-{
-	struct c2_vq_req *vq_req;
-	struct c2wr_rnic_query_req wr;
-	struct c2wr_rnic_query_rep *reply;
-	int err;
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
-	wr.hdr.context = (unsigned long) vq_req;
-	wr.rnic_handle = c2dev->adapter_handle;
-
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, (union c2wr *) &wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail1;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail1;
-
-	reply =
-	    (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply)
-		err = -ENOMEM;
-	else
-		err = c2_errno(reply);
-	if (err)
-		goto bail2;
-
-	props->fw_ver =
-		((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
-		((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
-		(be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
-	memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
-	props->max_mr_size         = 0xFFFFFFFF;
-	props->page_size_cap       = ~(C2_MIN_PAGESIZE-1);
-	props->vendor_id           = be32_to_cpu(reply->vendor_id);
-	props->vendor_part_id      = be32_to_cpu(reply->part_number);
-	props->hw_ver              = be32_to_cpu(reply->hw_version);
-	props->max_qp              = be32_to_cpu(reply->max_qps);
-	props->max_qp_wr           = be32_to_cpu(reply->max_qp_depth);
-	props->device_cap_flags    = c2dev->device_cap_flags;
-	props->max_sge             = C2_MAX_SGES;
-	props->max_sge_rd          = C2_MAX_SGE_RD;
-	props->max_cq              = be32_to_cpu(reply->max_cqs);
-	props->max_cqe             = be32_to_cpu(reply->max_cq_depth);
-	props->max_mr              = be32_to_cpu(reply->max_mrs);
-	props->max_pd              = be32_to_cpu(reply->max_pds);
-	props->max_qp_rd_atom      = be32_to_cpu(reply->max_qp_ird);
-	props->max_ee_rd_atom      = 0;
-	props->max_res_rd_atom     = be32_to_cpu(reply->max_global_ird);
-	props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
-	props->max_ee_init_rd_atom = 0;
-	props->atomic_cap          = IB_ATOMIC_NONE;
-	props->max_ee              = 0;
-	props->max_rdd             = 0;
-	props->max_mw              = be32_to_cpu(reply->max_mws);
-	props->max_raw_ipv6_qp     = 0;
-	props->max_raw_ethy_qp     = 0;
-	props->max_mcast_grp       = 0;
-	props->max_mcast_qp_attach = 0;
-	props->max_total_mcast_qp_attach = 0;
-	props->max_ah              = 0;
-	props->max_fmr             = 0;
-	props->max_map_per_fmr     = 0;
-	props->max_srq             = 0;
-	props->max_srq_wr          = 0;
-	props->max_srq_sge         = 0;
-	props->max_pkeys           = 0;
-	props->local_ca_ack_delay  = 0;
-
- bail2:
-	vq_repbuf_free(c2dev, reply);
-
- bail1:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-/*
- * Add an IP address to the RNIC interface
- */
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
-	struct c2_vq_req *vq_req;
-	struct c2wr_rnic_setconfig_req *wr;
-	struct c2wr_rnic_setconfig_rep *reply;
-	struct c2_netaddr netaddr;
-	int err, len;
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	len = sizeof(struct c2_netaddr);
-	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-	if (!wr) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
-	wr->hdr.context = (unsigned long) vq_req;
-	wr->rnic_handle = c2dev->adapter_handle;
-	wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
-
-	netaddr.ip_addr = inaddr;
-	netaddr.netmask = inmask;
-	netaddr.mtu = 0;
-
-	memcpy(wr->data, &netaddr, len);
-
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, (union c2wr *) wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail1;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail1;
-
-	reply =
-	    (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	err = c2_errno(reply);
-	vq_repbuf_free(c2dev, reply);
-
-bail1:
-	kfree(wr);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-/*
- * Delete an IP address from the RNIC interface
- */
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
-	struct c2_vq_req *vq_req;
-	struct c2wr_rnic_setconfig_req *wr;
-	struct c2wr_rnic_setconfig_rep *reply;
-	struct c2_netaddr netaddr;
-	int err, len;
-
-	vq_req = vq_req_alloc(c2dev);
-	if (!vq_req)
-		return -ENOMEM;
-
-	len = sizeof(struct c2_netaddr);
-	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-	if (!wr) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
-	wr->hdr.context = (unsigned long) vq_req;
-	wr->rnic_handle = c2dev->adapter_handle;
-	wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
-
-	netaddr.ip_addr = inaddr;
-	netaddr.netmask = inmask;
-	netaddr.mtu = 0;
-
-	memcpy(wr->data, &netaddr, len);
-
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, (union c2wr *) wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail1;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err)
-		goto bail1;
-
-	reply =
-	    (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	err = c2_errno(reply);
-	vq_repbuf_free(c2dev, reply);
-
-bail1:
-	kfree(wr);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-/*
- * Open a single RNIC instance to use with all
- * low level openib calls
- */
-static int c2_rnic_open(struct c2_dev *c2dev)
-{
-	struct c2_vq_req *vq_req;
-	union c2wr wr;
-	struct c2wr_rnic_open_rep *reply;
-	int err;
-
-	vq_req = vq_req_alloc(c2dev);
-	if (vq_req == NULL) {
-		return -ENOMEM;
-	}
-
-	memset(&wr, 0, sizeof(wr));
-	c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
-	wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
-	wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
-	wr.rnic_open.req.port_num = cpu_to_be16(0);
-	wr.rnic_open.req.user_context = (unsigned long) c2dev;
-
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, &wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err) {
-		goto bail0;
-	}
-
-	reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	if ((err = c2_errno(reply)) != 0) {
-		goto bail1;
-	}
-
-	c2dev->adapter_handle = reply->rnic_handle;
-
-bail1:
-	vq_repbuf_free(c2dev, reply);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-/*
- * Close the RNIC instance
- */
-static int c2_rnic_close(struct c2_dev *c2dev)
-{
-	struct c2_vq_req *vq_req;
-	union c2wr wr;
-	struct c2wr_rnic_close_rep *reply;
-	int err;
-
-	vq_req = vq_req_alloc(c2dev);
-	if (vq_req == NULL) {
-		return -ENOMEM;
-	}
-
-	memset(&wr, 0, sizeof(wr));
-	c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
-	wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
-	wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
-
-	vq_req_get(c2dev, vq_req);
-
-	err = vq_send_wr(c2dev, &wr);
-	if (err) {
-		vq_req_put(c2dev, vq_req);
-		goto bail0;
-	}
-
-	err = vq_wait_for_reply(c2dev, vq_req);
-	if (err) {
-		goto bail0;
-	}
-
-	reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
-	if (!reply) {
-		err = -ENOMEM;
-		goto bail0;
-	}
-
-	if ((err = c2_errno(reply)) != 0) {
-		goto bail1;
-	}
-
-	c2dev->adapter_handle = 0;
-
-bail1:
-	vq_repbuf_free(c2dev, reply);
-bail0:
-	vq_req_free(c2dev, vq_req);
-	return err;
-}
-
-/*
- * Called by c2_probe to initialize the RNIC. This principally
- * involves initializing the various limits and resource pools that
- * comprise the RNIC instance.
- */
-int c2_rnic_init(struct c2_dev *c2dev)
-{
-	int err;
-	u32 qsize, msgsize;
-	void *q1_pages;
-	void *q2_pages;
-	void __iomem *mmio_regs;
-
-	/* Device capabilities */
-	c2dev->device_cap_flags =
-	    (IB_DEVICE_RESIZE_MAX_WR |
-	     IB_DEVICE_CURR_QP_STATE_MOD |
-	     IB_DEVICE_SYS_IMAGE_GUID |
-	     IB_DEVICE_LOCAL_DMA_LKEY |
-	     IB_DEVICE_MEM_WINDOW);
-
-	/* Allocate the qptr_array */
-	c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
-	if (!c2dev->qptr_array) {
-		return -ENOMEM;
-	}
-
-	/* Initialize the qptr_array */
-	c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
-	c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
-	c2dev->qptr_array[2] = (void *) &c2dev->aeq;
-
-	/* Initialize data structures */
-	init_waitqueue_head(&c2dev->req_vq_wo);
-	spin_lock_init(&c2dev->vqlock);
-	spin_lock_init(&c2dev->lock);
-
-	/* Allocate MQ shared pointer pool for kernel clients. User
-	 * mode client pools are hung off the user context
-	 */
-	err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
-	if (err) {
-		goto bail0;
-	}
-
-	/* Allocate shared pointers for Q0, Q1, and Q2 from
-	 * the shared pointer pool.
-	 */
-
-	c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-					     &c2dev->hint_count_dma,
-					     GFP_KERNEL);
-	c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-					     &c2dev->req_vq.shared_dma,
-					     GFP_KERNEL);
-	c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-					     &c2dev->rep_vq.shared_dma,
-					     GFP_KERNEL);
-	c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-					  &c2dev->aeq.shared_dma, GFP_KERNEL);
-	if (!c2dev->hint_count || !c2dev->req_vq.shared ||
-	    !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-
-	mmio_regs = c2dev->kva;
-	/* Initialize the Verbs Request Queue */
-	c2_mq_req_init(&c2dev->req_vq, 0,
-		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
-		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
-		       mmio_regs +
-		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
-		       mmio_regs +
-		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
-		       C2_MQ_ADAPTER_TARGET);
-
-	/* Initialize the Verbs Reply Queue */
-	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
-	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
-	q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
-				      &c2dev->rep_vq.host_dma, GFP_KERNEL);
-	if (!q1_pages) {
-		err = -ENOMEM;
-		goto bail1;
-	}
-	dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
-	pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
-		 (unsigned long long) c2dev->rep_vq.host_dma);
-	c2_mq_rep_init(&c2dev->rep_vq,
-		   1,
-		   qsize,
-		   msgsize,
-		   q1_pages,
-		   mmio_regs +
-		   be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
-		   C2_MQ_HOST_TARGET);
-
-	/* Initialize the Asynchronus Event Queue */
-	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
-	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
-	q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
-				      &c2dev->aeq.host_dma, GFP_KERNEL);
-	if (!q2_pages) {
-		err = -ENOMEM;
-		goto bail2;
-	}
-	dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
-	pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
-		 (unsigned long long) c2dev->aeq.host_dma);
-	c2_mq_rep_init(&c2dev->aeq,
-		       2,
-		       qsize,
-		       msgsize,
-		       q2_pages,
-		       mmio_regs +
-		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
-		       C2_MQ_HOST_TARGET);
-
-	/* Initialize the verbs request allocator */
-	err = vq_init(c2dev);
-	if (err)
-		goto bail3;
-
-	/* Enable interrupts on the adapter */
-	writel(0, c2dev->regs + C2_IDIS);
-
-	/* create the WR init message */
-	err = c2_adapter_init(c2dev);
-	if (err)
-		goto bail4;
-	c2dev->init++;
-
-	/* open an adapter instance */
-	err = c2_rnic_open(c2dev);
-	if (err)
-		goto bail4;
-
-	/* Initialize cached the adapter limits */
-	err = c2_rnic_query(c2dev, &c2dev->props);
-	if (err)
-		goto bail5;
-
-	/* Initialize the PD pool */
-	err = c2_init_pd_table(c2dev);
-	if (err)
-		goto bail5;
-
-	/* Initialize the QP pool */
-	c2_init_qp_table(c2dev);
-	return 0;
-
-bail5:
-	c2_rnic_close(c2dev);
-bail4:
-	vq_term(c2dev);
-bail3:
-	dma_free_coherent(&c2dev->pcidev->dev,
-			  c2dev->aeq.q_size * c2dev->aeq.msg_size,
-			  q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
-bail2:
-	dma_free_coherent(&c2dev->pcidev->dev,
-			  c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
-			  q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
-bail1:
-	c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-bail0:
-	vfree(c2dev->qptr_array);
-
-	return err;
-}
-
-/*
- * Called by c2_remove to cleanup the RNIC resources.
- */
-void c2_rnic_term(struct c2_dev *c2dev)
-{
-
-	/* Close the open adapter instance */
-	c2_rnic_close(c2dev);
-
-	/* Send the TERM message to the adapter */
-	c2_adapter_term(c2dev);
-
-	/* Disable interrupts on the adapter */
-	writel(1, c2dev->regs + C2_IDIS);
-
-	/* Free the QP pool */
-	c2_cleanup_qp_table(c2dev);
-
-	/* Free the PD pool */
-	c2_cleanup_pd_table(c2dev);
-
-	/* Free the verbs request allocator */
-	vq_term(c2dev);
-
-	/* Free the asynchronus event queue */
-	dma_free_coherent(&c2dev->pcidev->dev,
-			  c2dev->aeq.q_size * c2dev->aeq.msg_size,
-			  c2dev->aeq.msg_pool.host,
-			  dma_unmap_addr(&c2dev->aeq, mapping));
-
-	/* Free the verbs reply queue */
-	dma_free_coherent(&c2dev->pcidev->dev,
-			  c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
-			  c2dev->rep_vq.msg_pool.host,
-			  dma_unmap_addr(&c2dev->rep_vq, mapping));
-
-	/* Free the MQ shared pointer pool */
-	c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-
-	/* Free the qptr_array */
-	vfree(c2dev->qptr_array);
-
-	return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_status.h b/drivers/staging/rdma/amso1100/c2_status.h
deleted file mode 100644
index 6ee4aa9..0000000
--- a/drivers/staging/rdma/amso1100/c2_status.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef	_C2_STATUS_H_
-#define _C2_STATUS_H_
-
-/*
- * Verbs Status Codes
- */
-enum c2_status {
-	C2_OK = 0,		/* This must be zero */
-	CCERR_INSUFFICIENT_RESOURCES = 1,
-	CCERR_INVALID_MODIFIER = 2,
-	CCERR_INVALID_MODE = 3,
-	CCERR_IN_USE = 4,
-	CCERR_INVALID_RNIC = 5,
-	CCERR_INTERRUPTED_OPERATION = 6,
-	CCERR_INVALID_EH = 7,
-	CCERR_INVALID_CQ = 8,
-	CCERR_CQ_EMPTY = 9,
-	CCERR_NOT_IMPLEMENTED = 10,
-	CCERR_CQ_DEPTH_TOO_SMALL = 11,
-	CCERR_PD_IN_USE = 12,
-	CCERR_INVALID_PD = 13,
-	CCERR_INVALID_SRQ = 14,
-	CCERR_INVALID_ADDRESS = 15,
-	CCERR_INVALID_NETMASK = 16,
-	CCERR_INVALID_QP = 17,
-	CCERR_INVALID_QP_STATE = 18,
-	CCERR_TOO_MANY_WRS_POSTED = 19,
-	CCERR_INVALID_WR_TYPE = 20,
-	CCERR_INVALID_SGL_LENGTH = 21,
-	CCERR_INVALID_SQ_DEPTH = 22,
-	CCERR_INVALID_RQ_DEPTH = 23,
-	CCERR_INVALID_ORD = 24,
-	CCERR_INVALID_IRD = 25,
-	CCERR_QP_ATTR_CANNOT_CHANGE = 26,
-	CCERR_INVALID_STAG = 27,
-	CCERR_QP_IN_USE = 28,
-	CCERR_OUTSTANDING_WRS = 29,
-	CCERR_STAG_IN_USE = 30,
-	CCERR_INVALID_STAG_INDEX = 31,
-	CCERR_INVALID_SGL_FORMAT = 32,
-	CCERR_ADAPTER_TIMEOUT = 33,
-	CCERR_INVALID_CQ_DEPTH = 34,
-	CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
-	CCERR_INVALID_EP = 36,
-	CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
-	CCERR_FLUSHED = 38,
-	CCERR_INVALID_WQE = 39,
-	CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
-	CCERR_REMOTE_TERMINATION_ERROR = 41,
-	CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
-	CCERR_ACCESS_VIOLATION = 43,
-	CCERR_INVALID_PD_ID = 44,
-	CCERR_WRAP_ERROR = 45,
-	CCERR_INV_STAG_ACCESS_ERROR = 46,
-	CCERR_ZERO_RDMA_READ_RESOURCES = 47,
-	CCERR_QP_NOT_PRIVILEGED = 48,
-	CCERR_STAG_STATE_NOT_INVALID = 49,
-	CCERR_INVALID_PAGE_SIZE = 50,
-	CCERR_INVALID_BUFFER_SIZE = 51,
-	CCERR_INVALID_PBE = 52,
-	CCERR_INVALID_FBO = 53,
-	CCERR_INVALID_LENGTH = 54,
-	CCERR_INVALID_ACCESS_RIGHTS = 55,
-	CCERR_PBL_TOO_BIG = 56,
-	CCERR_INVALID_VA = 57,
-	CCERR_INVALID_REGION = 58,
-	CCERR_INVALID_WINDOW = 59,
-	CCERR_TOTAL_LENGTH_TOO_BIG = 60,
-	CCERR_INVALID_QP_ID = 61,
-	CCERR_ADDR_IN_USE = 62,
-	CCERR_ADDR_NOT_AVAIL = 63,
-	CCERR_NET_DOWN = 64,
-	CCERR_NET_UNREACHABLE = 65,
-	CCERR_CONN_ABORTED = 66,
-	CCERR_CONN_RESET = 67,
-	CCERR_NO_BUFS = 68,
-	CCERR_CONN_TIMEDOUT = 69,
-	CCERR_CONN_REFUSED = 70,
-	CCERR_HOST_UNREACHABLE = 71,
-	CCERR_INVALID_SEND_SGL_DEPTH = 72,
-	CCERR_INVALID_RECV_SGL_DEPTH = 73,
-	CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
-	CCERR_INSUFFICIENT_PRIVILEGES = 75,
-	CCERR_STACK_ERROR = 76,
-	CCERR_INVALID_VERSION = 77,
-	CCERR_INVALID_MTU = 78,
-	CCERR_INVALID_IMAGE = 79,
-	CCERR_PENDING = 98,	/* not an error; user internally by adapter */
-	CCERR_DEFER = 99,	/* not an error; used internally by adapter */
-	CCERR_FAILED_WRITE = 100,
-	CCERR_FAILED_ERASE = 101,
-	CCERR_FAILED_VERIFICATION = 102,
-	CCERR_NOT_FOUND = 103,
-
-};
-
-/*
- * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
- */
-enum c2_connect_status {
-	C2_CONN_STATUS_SUCCESS = C2_OK,
-	C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
-	C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
-	C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
-	C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
-	C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
-	C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
-	C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
-	C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
-	C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
-	C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
-};
-
-/*
- * Flash programming status codes.
- */
-enum c2_flash_status {
-	C2_FLASH_STATUS_SUCCESS = 0x0000,
-	C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
-	C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
-	C2_FLASH_STATUS_ECLBS = 0x0400,
-	C2_FLASH_STATUS_PSLBS = 0x0800,
-	C2_FLASH_STATUS_VPENS = 0x1000,
-};
-
-#endif				/* _C2_STATUS_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_user.h b/drivers/staging/rdma/amso1100/c2_user.h
deleted file mode 100644
index 7e9e7ad..0000000
--- a/drivers/staging/rdma/amso1100/c2_user.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_USER_H
-#define C2_USER_H
-
-#include <linux/types.h>
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct c2_alloc_ucontext_resp {
-	__u32 qp_tab_size;
-	__u32 uarc_size;
-};
-
-struct c2_alloc_pd_resp {
-	__u32 pdn;
-	__u32 reserved;
-};
-
-struct c2_create_cq {
-	__u32 lkey;
-	__u32 pdn;
-	__u64 arm_db_page;
-	__u64 set_db_page;
-	__u32 arm_db_index;
-	__u32 set_db_index;
-};
-
-struct c2_create_cq_resp {
-	__u32 cqn;
-	__u32 reserved;
-};
-
-struct c2_create_qp {
-	__u32 lkey;
-	__u32 reserved;
-	__u64 sq_db_page;
-	__u64 rq_db_page;
-	__u32 sq_db_index;
-	__u32 rq_db_index;
-};
-
-#endif				/* C2_USER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_vq.c b/drivers/staging/rdma/amso1100/c2_vq.c
deleted file mode 100644
index 2ec716f..0000000
--- a/drivers/staging/rdma/amso1100/c2_vq.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include "c2_vq.h"
-#include "c2_provider.h"
-
-/*
- * Verbs Request Objects:
- *
- * VQ Request Objects are allocated by the kernel verbs handlers.
- * They contain a wait object, a refcnt, an atomic bool indicating that the
- * adapter has replied, and a copy of the verb reply work request.
- * A pointer to the VQ Request Object is passed down in the context
- * field of the work request message, and reflected back by the adapter
- * in the verbs reply message.  The function handle_vq() in the interrupt
- * path will use this pointer to:
- * 	1) append a copy of the verbs reply message
- * 	2) mark that the reply is ready
- * 	3) wake up the kernel verbs handler blocked awaiting the reply.
- *
- *
- * The kernel verbs handlers do a "get" to put a 2nd reference on the
- * VQ Request object.  If the kernel verbs handler exits before the adapter
- * can respond, this extra reference will keep the VQ Request object around
- * until the adapter's reply can be processed.  The reason we need this is
- * because a pointer to this object is stuffed into the context field of
- * the verbs work request message, and reflected back in the reply message.
- * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
- * kernel verb handler that is blocked awaiting the verb reply.
- * So handle_vq() will do a "put" on the object when it's done accessing it.
- * NOTE:  If we guarantee that the kernel verb handler will never bail before
- *        getting the reply, then we don't need these refcnts.
- *
- *
- * VQ Request objects are freed by the kernel verbs handlers only
- * after the verb has been processed, or when the adapter fails and
- * does not reply.
- *
- *
- * Verbs Reply Buffers:
- *
- * VQ Reply bufs are local host memory copies of a
- * outstanding Verb Request reply
- * message.  The are always allocated by the kernel verbs handlers, and _may_ be
- * freed by either the kernel verbs handler -or- the interrupt handler.  The
- * kernel verbs handler _must_ free the repbuf, then free the vq request object
- * in that order.
- */
-
-int vq_init(struct c2_dev *c2dev)
-{
-	sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
-		(char) ('0' + c2dev->devnum));
-	c2dev->host_msg_cache =
-	    kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
-			      SLAB_HWCACHE_ALIGN, NULL);
-	if (c2dev->host_msg_cache == NULL) {
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-void vq_term(struct c2_dev *c2dev)
-{
-	kmem_cache_destroy(c2dev->host_msg_cache);
-}
-
-/* vq_req_alloc - allocate a VQ Request Object and initialize it.
- * The refcnt is set to 1.
- */
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
-{
-	struct c2_vq_req *r;
-
-	r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
-	if (r) {
-		init_waitqueue_head(&r->wait_object);
-		r->reply_msg = 0;
-		r->event = 0;
-		r->cm_id = NULL;
-		r->qp = NULL;
-		atomic_set(&r->refcnt, 1);
-		atomic_set(&r->reply_ready, 0);
-	}
-	return r;
-}
-
-
-/* vq_req_free - free the VQ Request Object.  It is assumed the verbs handler
- * has already free the VQ Reply Buffer if it existed.
- */
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-	r->reply_msg = 0;
-	if (atomic_dec_and_test(&r->refcnt)) {
-		kfree(r);
-	}
-}
-
-/* vq_req_get - reference a VQ Request Object.  Done
- * only in the kernel verbs handlers.
- */
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-	atomic_inc(&r->refcnt);
-}
-
-
-/* vq_req_put - dereference and potentially free a VQ Request Object.
- *
- * This is only called by handle_vq() on the
- * interrupt when it is done processing
- * a verb reply message.  If the associated
- * kernel verbs handler has already bailed,
- * then this put will actually free the VQ
- * Request object _and_ the VQ Reply Buffer
- * if it exists.
- */
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-	if (atomic_dec_and_test(&r->refcnt)) {
-		if (r->reply_msg != 0)
-			vq_repbuf_free(c2dev,
-				       (void *) (unsigned long) r->reply_msg);
-		kfree(r);
-	}
-}
-
-
-/*
- * vq_repbuf_alloc - allocate a VQ Reply Buffer.
- */
-void *vq_repbuf_alloc(struct c2_dev *c2dev)
-{
-	return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
-}
-
-/*
- * vq_send_wr - post a verbs request message to the Verbs Request Queue.
- * If a message is not available in the MQ, then block until one is available.
- * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
- * When the adapter drains the Verbs Request Queue,
- * it inserts MQ index 0 in to the
- * adapter->host activity fifo and interrupts the host.
- */
-int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
-{
-	void *msg;
-	wait_queue_t __wait;
-
-	/*
-	 * grab adapter vq lock
-	 */
-	spin_lock(&c2dev->vqlock);
-
-	/*
-	 * allocate msg
-	 */
-	msg = c2_mq_alloc(&c2dev->req_vq);
-
-	/*
-	 * If we cannot get a msg, then we'll wait
-	 * When a messages are available, the int handler will wake_up()
-	 * any waiters.
-	 */
-	while (msg == NULL) {
-		pr_debug("%s:%d no available msg in VQ, waiting...\n",
-		       __func__, __LINE__);
-		init_waitqueue_entry(&__wait, current);
-		add_wait_queue(&c2dev->req_vq_wo, &__wait);
-		spin_unlock(&c2dev->vqlock);
-		for (;;) {
-			set_current_state(TASK_INTERRUPTIBLE);
-			if (!c2_mq_full(&c2dev->req_vq)) {
-				break;
-			}
-			if (!signal_pending(current)) {
-				schedule_timeout(1 * HZ);	/* 1 second... */
-				continue;
-			}
-			set_current_state(TASK_RUNNING);
-			remove_wait_queue(&c2dev->req_vq_wo, &__wait);
-			return -EINTR;
-		}
-		set_current_state(TASK_RUNNING);
-		remove_wait_queue(&c2dev->req_vq_wo, &__wait);
-		spin_lock(&c2dev->vqlock);
-		msg = c2_mq_alloc(&c2dev->req_vq);
-	}
-
-	/*
-	 * copy wr into adapter msg
-	 */
-	memcpy(msg, wr, c2dev->req_vq.msg_size);
-
-	/*
-	 * post msg
-	 */
-	c2_mq_produce(&c2dev->req_vq);
-
-	/*
-	 * release adapter vq lock
-	 */
-	spin_unlock(&c2dev->vqlock);
-	return 0;
-}
-
-
-/*
- * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
- */
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
-{
-	if (!wait_event_timeout(req->wait_object,
-				atomic_read(&req->reply_ready),
-				60*HZ))
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-/*
- * vq_repbuf_free - Free a Verbs Reply Buffer.
- */
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
-{
-	kmem_cache_free(c2dev->host_msg_cache, reply);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
deleted file mode 100644
index c1f6cef..0000000
--- a/drivers/staging/rdma/amso1100/c2_vq.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_VQ_H_
-#define _C2_VQ_H_
-#include <linux/sched.h>
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_provider.h"
-
-struct c2_vq_req {
-	u64 reply_msg;		/* ptr to reply msg */
-	wait_queue_head_t wait_object;	/* wait object for vq reqs */
-	atomic_t reply_ready;	/* set when reply is ready */
-	atomic_t refcnt;	/* used to cancel WRs... */
-	int event;
-	struct iw_cm_id *cm_id;
-	struct c2_qp *qp;
-};
-
-int vq_init(struct c2_dev *c2dev);
-void vq_term(struct c2_dev *c2dev);
-
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
-int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
-
-void *vq_repbuf_alloc(struct c2_dev *c2dev);
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
-
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
-#endif				/* _C2_VQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_wr.h b/drivers/staging/rdma/amso1100/c2_wr.h
deleted file mode 100644
index 8d4b4ca..0000000
--- a/drivers/staging/rdma/amso1100/c2_wr.h
+++ /dev/null
@@ -1,1520 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_WR_H_
-#define _C2_WR_H_
-
-#ifdef CCDEBUG
-#define CCWR_MAGIC		0xb07700b0
-#endif
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-/* Maximum allowed size in bytes of private_data exchange
- * on connect.
- */
-#define C2_MAX_PRIVATE_DATA_SIZE 200
-
-/*
- * These types are shared among the adapter, host, and CCIL consumer.
- */
-enum c2_cq_notification_type {
-	C2_CQ_NOTIFICATION_TYPE_NONE = 1,
-	C2_CQ_NOTIFICATION_TYPE_NEXT,
-	C2_CQ_NOTIFICATION_TYPE_NEXT_SE
-};
-
-enum c2_setconfig_cmd {
-	C2_CFG_ADD_ADDR = 1,
-	C2_CFG_DEL_ADDR = 2,
-	C2_CFG_ADD_ROUTE = 3,
-	C2_CFG_DEL_ROUTE = 4
-};
-
-enum c2_getconfig_cmd {
-	C2_GETCONFIG_ROUTES = 1,
-	C2_GETCONFIG_ADDRS
-};
-
-/*
- *  CCIL Work Request Identifiers
- */
-enum c2wr_ids {
-	CCWR_RNIC_OPEN = 1,
-	CCWR_RNIC_QUERY,
-	CCWR_RNIC_SETCONFIG,
-	CCWR_RNIC_GETCONFIG,
-	CCWR_RNIC_CLOSE,
-	CCWR_CQ_CREATE,
-	CCWR_CQ_QUERY,
-	CCWR_CQ_MODIFY,
-	CCWR_CQ_DESTROY,
-	CCWR_QP_CONNECT,
-	CCWR_PD_ALLOC,
-	CCWR_PD_DEALLOC,
-	CCWR_SRQ_CREATE,
-	CCWR_SRQ_QUERY,
-	CCWR_SRQ_MODIFY,
-	CCWR_SRQ_DESTROY,
-	CCWR_QP_CREATE,
-	CCWR_QP_QUERY,
-	CCWR_QP_MODIFY,
-	CCWR_QP_DESTROY,
-	CCWR_NSMR_STAG_ALLOC,
-	CCWR_NSMR_REGISTER,
-	CCWR_NSMR_PBL,
-	CCWR_STAG_DEALLOC,
-	CCWR_NSMR_REREGISTER,
-	CCWR_SMR_REGISTER,
-	CCWR_MR_QUERY,
-	CCWR_MW_ALLOC,
-	CCWR_MW_QUERY,
-	CCWR_EP_CREATE,
-	CCWR_EP_GETOPT,
-	CCWR_EP_SETOPT,
-	CCWR_EP_DESTROY,
-	CCWR_EP_BIND,
-	CCWR_EP_CONNECT,
-	CCWR_EP_LISTEN,
-	CCWR_EP_SHUTDOWN,
-	CCWR_EP_LISTEN_CREATE,
-	CCWR_EP_LISTEN_DESTROY,
-	CCWR_EP_QUERY,
-	CCWR_CR_ACCEPT,
-	CCWR_CR_REJECT,
-	CCWR_CONSOLE,
-	CCWR_TERM,
-	CCWR_FLASH_INIT,
-	CCWR_FLASH,
-	CCWR_BUF_ALLOC,
-	CCWR_BUF_FREE,
-	CCWR_FLASH_WRITE,
-	CCWR_INIT,		/* WARNING: Don't move this ever again! */
-
-
-
-	/* Add new IDs here */
-
-
-
-	/*
-	 * WARNING: CCWR_LAST must always be the last verbs id defined!
-	 *          All the preceding IDs are fixed, and must not change.
-	 *          You can add new IDs, but must not remove or reorder
-	 *          any IDs. If you do, YOU will ruin any hope of
-	 *          compatibility between versions.
-	 */
-	CCWR_LAST,
-
-	/*
-	 * Start over at 1 so that arrays indexed by user wr id's
-	 * begin at 1.  This is OK since the verbs and user wr id's
-	 * are always used on disjoint sets of queues.
-	 */
-	/*
-	 * The order of the CCWR_SEND_XX verbs must
-	 * match the order of the RDMA_OPs
-	 */
-	CCWR_SEND = 1,
-	CCWR_SEND_INV,
-	CCWR_SEND_SE,
-	CCWR_SEND_SE_INV,
-	CCWR_RDMA_WRITE,
-	CCWR_RDMA_READ,
-	CCWR_RDMA_READ_INV,
-	CCWR_MW_BIND,
-	CCWR_NSMR_FASTREG,
-	CCWR_STAG_INVALIDATE,
-	CCWR_RECV,
-	CCWR_NOP,
-	CCWR_UNIMPL,
-/* WARNING: This must always be the last user wr id defined! */
-};
-#define RDMA_SEND_OPCODE_FROM_WR_ID(x)   (x+2)
-
-/*
- * SQ/RQ Work Request Types
- */
-enum c2_wr_type {
-	C2_WR_TYPE_SEND = CCWR_SEND,
-	C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
-	C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
-	C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
-	C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
-	C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
-	C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
-	C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
-	C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
-	C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
-	C2_WR_TYPE_RECV = CCWR_RECV,
-	C2_WR_TYPE_NOP = CCWR_NOP,
-};
-
-struct c2_netaddr {
-	__be32 ip_addr;
-	__be32 netmask;
-	u32 mtu;
-};
-
-struct c2_route {
-	u32 ip_addr;		/* 0 indicates the default route */
-	u32 netmask;		/* netmask associated with dst */
-	u32 flags;
-	union {
-		u32 ipaddr;	/* address of the nexthop interface */
-		u8 enaddr[6];
-	} nexthop;
-};
-
-/*
- * A Scatter Gather Entry.
- */
-struct c2_data_addr {
-	__be32 stag;
-	__be32 length;
-	__be64 to;
-};
-
-/*
- * MR and MW flags used by the consumer, RI, and RNIC.
- */
-enum c2_mm_flags {
-	MEM_REMOTE = 0x0001,	/* allow mw binds with remote access. */
-	MEM_VA_BASED = 0x0002,	/* Not Zero-based */
-	MEM_PBL_COMPLETE = 0x0004,	/* PBL array is complete in this msg */
-	MEM_LOCAL_READ = 0x0008,	/* allow local reads */
-	MEM_LOCAL_WRITE = 0x0010,	/* allow local writes */
-	MEM_REMOTE_READ = 0x0020,	/* allow remote reads */
-	MEM_REMOTE_WRITE = 0x0040,	/* allow remote writes */
-	MEM_WINDOW_BIND = 0x0080,	/* binds allowed */
-	MEM_SHARED = 0x0100,	/* set if MR is shared */
-	MEM_STAG_VALID = 0x0200	/* set if STAG is in valid state */
-};
-
-/*
- * CCIL API ACF flags defined in terms of the low level mem flags.
- * This minimizes translation needed in the user API
- */
-enum c2_acf {
-	C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
-	C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
-	C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
-	C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
-	C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
-};
-
-/*
- * Image types of objects written to flash
- */
-#define C2_FLASH_IMG_BITFILE 1
-#define C2_FLASH_IMG_OPTION_ROM 2
-#define C2_FLASH_IMG_VPD 3
-
-/*
- *  to fix bug 1815 we define the max size allowable of the
- *  terminate message (per the IETF spec).Refer to the IETF
- *  protocol specification, section 12.1.6, page 64)
- *  The message is prefixed by 20 types of DDP info.
- *
- *  Then the message has 6 bytes for the terminate control
- *  and DDP segment length info plus a DDP header (either
- *  14 or 18 byts) plus 28 bytes for the RDMA header.
- *  Thus the max size in:
- *  20 + (6 + 18 + 28) = 72
- */
-#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
-
-/*
- * Build String Length.  It must be the same as C2_BUILD_STR_LEN in ccil_api.h
- */
-#define WR_BUILD_STR_LEN 64
-
-/*
- * WARNING:  All of these structs need to align any 64bit types on
- * 64 bit boundaries!  64bit types include u64 and u64.
- */
-
-/*
- * Clustercore Work Request Header.  Be sensitive to field layout
- * and alignment.
- */
-struct c2wr_hdr {
-	/* wqe_count is part of the cqe.  It is put here so the
-	 * adapter can write to it while the wr is pending without
-	 * clobbering part of the wr.  This word need not be dma'd
-	 * from the host to adapter by libccil, but we copy it anyway
-	 * to make the memcpy to the adapter better aligned.
-	 */
-	__be32 wqe_count;
-
-	/* Put these fields next so that later 32- and 64-bit
-	 * quantities are naturally aligned.
-	 */
-	u8 id;
-	u8 result;		/* adapter -> host */
-	u8 sge_count;		/* host -> adapter */
-	u8 flags;		/* host -> adapter */
-
-	u64 context;
-#ifdef CCMSGMAGIC
-	u32 magic;
-	u32 pad;
-#endif
-} __attribute__((packed));
-
-/*
- *------------------------ RNIC ------------------------
- */
-
-/*
- * WR_RNIC_OPEN
- */
-
-/*
- * Flags for the RNIC WRs
- */
-enum c2_rnic_flags {
-	RNIC_IRD_STATIC = 0x0001,
-	RNIC_ORD_STATIC = 0x0002,
-	RNIC_QP_STATIC = 0x0004,
-	RNIC_SRQ_SUPPORTED = 0x0008,
-	RNIC_PBL_BLOCK_MODE = 0x0010,
-	RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
-	RNIC_CQ_OVF_DETECTED = 0x0040,
-	RNIC_PRIV_MODE = 0x0080
-};
-
-struct c2wr_rnic_open_req {
-	struct c2wr_hdr hdr;
-	u64 user_context;
-	__be16 flags;		/* See enum c2_rnic_flags */
-	__be16 port_num;
-} __attribute__((packed));
-
-struct c2wr_rnic_open_rep {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-} __attribute__((packed));
-
-union c2wr_rnic_open {
-	struct c2wr_rnic_open_req req;
-	struct c2wr_rnic_open_rep rep;
-} __attribute__((packed));
-
-struct c2wr_rnic_query_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_QUERY
- */
-struct c2wr_rnic_query_rep {
-	struct c2wr_hdr hdr;
-	u64 user_context;
-	__be32 vendor_id;
-	__be32 part_number;
-	__be32 hw_version;
-	__be32 fw_ver_major;
-	__be32 fw_ver_minor;
-	__be32 fw_ver_patch;
-	char fw_ver_build_str[WR_BUILD_STR_LEN];
-	__be32 max_qps;
-	__be32 max_qp_depth;
-	u32 max_srq_depth;
-	u32 max_send_sgl_depth;
-	u32 max_rdma_sgl_depth;
-	__be32 max_cqs;
-	__be32 max_cq_depth;
-	u32 max_cq_event_handlers;
-	__be32 max_mrs;
-	u32 max_pbl_depth;
-	__be32 max_pds;
-	__be32 max_global_ird;
-	u32 max_global_ord;
-	__be32 max_qp_ird;
-	__be32 max_qp_ord;
-	u32 flags;
-	__be32 max_mws;
-	u32 pbe_range_low;
-	u32 pbe_range_high;
-	u32 max_srqs;
-	u32 page_size;
-} __attribute__((packed));
-
-union c2wr_rnic_query {
-	struct c2wr_rnic_query_req req;
-	struct c2wr_rnic_query_rep rep;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_GETCONFIG
- */
-
-struct c2wr_rnic_getconfig_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 option;		/* see c2_getconfig_cmd_t */
-	u64 reply_buf;
-	u32 reply_buf_len;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_getconfig_rep {
-	struct c2wr_hdr hdr;
-	u32 option;		/* see c2_getconfig_cmd_t */
-	u32 count_len;		/* length of the number of addresses configured */
-} __attribute__((packed)) ;
-
-union c2wr_rnic_getconfig {
-	struct c2wr_rnic_getconfig_req req;
-	struct c2wr_rnic_getconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_SETCONFIG
- */
-struct c2wr_rnic_setconfig_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	__be32 option;		/* See c2_setconfig_cmd_t */
-	/* variable data and pad. See c2_netaddr and c2_route */
-	u8 data[0];
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_setconfig_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_setconfig {
-	struct c2wr_rnic_setconfig_req req;
-	struct c2wr_rnic_setconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_CLOSE
- */
-struct c2wr_rnic_close_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_close_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_close {
-	struct c2wr_rnic_close_req req;
-	struct c2wr_rnic_close_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ CQ ------------------------
- */
-struct c2wr_cq_create_req {
-	struct c2wr_hdr hdr;
-	__be64 shared_ht;
-	u64 user_context;
-	__be64 msg_pool;
-	u32 rnic_handle;
-	__be32 msg_size;
-	__be32 depth;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_create_rep {
-	struct c2wr_hdr hdr;
-	__be32 mq_index;
-	__be32 adapter_shared;
-	u32 cq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_cq_create {
-	struct c2wr_cq_create_req req;
-	struct c2wr_cq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 cq_handle;
-	u32 new_depth;
-	u64 new_msg_pool;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_modify {
-	struct c2wr_cq_modify_req req;
-	struct c2wr_cq_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 cq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_destroy {
-	struct c2wr_cq_destroy_req req;
-	struct c2wr_cq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ PD ------------------------
- */
-struct c2wr_pd_alloc_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_alloc_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_alloc {
-	struct c2wr_pd_alloc_req req;
-	struct c2wr_pd_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_dealloc {
-	struct c2wr_pd_dealloc_req req;
-	struct c2wr_pd_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ SRQ ------------------------
- */
-struct c2wr_srq_create_req {
-	struct c2wr_hdr hdr;
-	u64 shared_ht;
-	u64 user_context;
-	u32 rnic_handle;
-	u32 srq_depth;
-	u32 srq_limit;
-	u32 sgl_depth;
-	u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_create_rep {
-	struct c2wr_hdr hdr;
-	u32 srq_depth;
-	u32 sgl_depth;
-	u32 msg_size;
-	u32 mq_index;
-	u32 mq_start;
-	u32 srq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_srq_create {
-	struct c2wr_srq_create_req req;
-	struct c2wr_srq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 srq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_srq_destroy {
-	struct c2wr_srq_destroy_req req;
-	struct c2wr_srq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ QP ------------------------
- */
-enum c2wr_qp_flags {
-	QP_RDMA_READ = 0x00000001,	/* RDMA read enabled? */
-	QP_RDMA_WRITE = 0x00000002,	/* RDMA write enabled? */
-	QP_MW_BIND = 0x00000004,	/* MWs enabled */
-	QP_ZERO_STAG = 0x00000008,	/* enabled? */
-	QP_REMOTE_TERMINATION = 0x00000010,	/* remote end terminated */
-	QP_RDMA_READ_RESPONSE = 0x00000020	/* Remote RDMA read  */
-	    /* enabled? */
-};
-
-struct c2wr_qp_create_req {
-	struct c2wr_hdr hdr;
-	__be64 shared_sq_ht;
-	__be64 shared_rq_ht;
-	u64 user_context;
-	u32 rnic_handle;
-	u32 sq_cq_handle;
-	u32 rq_cq_handle;
-	__be32 sq_depth;
-	__be32 rq_depth;
-	u32 srq_handle;
-	u32 srq_limit;
-	__be32 flags;		/* see enum c2wr_qp_flags */
-	__be32 send_sgl_depth;
-	__be32 recv_sgl_depth;
-	__be32 rdma_write_sgl_depth;
-	__be32 ord;
-	__be32 ird;
-	u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_create_rep {
-	struct c2wr_hdr hdr;
-	__be32 sq_depth;
-	__be32 rq_depth;
-	u32 send_sgl_depth;
-	u32 recv_sgl_depth;
-	u32 rdma_write_sgl_depth;
-	u32 ord;
-	u32 ird;
-	__be32 sq_msg_size;
-	__be32 sq_mq_index;
-	__be32 sq_mq_start;
-	__be32 rq_msg_size;
-	__be32 rq_mq_index;
-	__be32 rq_mq_start;
-	u32 qp_handle;
-} __attribute__((packed)) ;
-
-union c2wr_qp_create {
-	struct c2wr_qp_create_req req;
-	struct c2wr_qp_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_rep {
-	struct c2wr_hdr hdr;
-	u64 user_context;
-	u32 rnic_handle;
-	u32 sq_depth;
-	u32 rq_depth;
-	u32 send_sgl_depth;
-	u32 rdma_write_sgl_depth;
-	u32 recv_sgl_depth;
-	u32 ord;
-	u32 ird;
-	u16 qp_state;
-	u16 flags;		/* see c2wr_qp_flags_t */
-	u32 qp_id;
-	u32 local_addr;
-	u32 remote_addr;
-	u16 local_port;
-	u16 remote_port;
-	u32 terminate_msg_length;	/* 0 if not present */
-	u8 data[0];
-	/* Terminate Message in-line here. */
-} __attribute__((packed)) ;
-
-union c2wr_qp_query {
-	struct c2wr_qp_query_req req;
-	struct c2wr_qp_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_req {
-	struct c2wr_hdr hdr;
-	u64 stream_msg;
-	u32 stream_msg_length;
-	u32 rnic_handle;
-	u32 qp_handle;
-	__be32 next_qp_state;
-	__be32 ord;
-	__be32 ird;
-	__be32 sq_depth;
-	__be32 rq_depth;
-	u32 llp_ep_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_rep {
-	struct c2wr_hdr hdr;
-	u32 ord;
-	u32 ird;
-	u32 sq_depth;
-	u32 rq_depth;
-	u32 sq_msg_size;
-	u32 sq_mq_index;
-	u32 sq_mq_start;
-	u32 rq_msg_size;
-	u32 rq_mq_index;
-	u32 rq_mq_start;
-} __attribute__((packed)) ;
-
-union c2wr_qp_modify {
-	struct c2wr_qp_modify_req req;
-	struct c2wr_qp_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_qp_destroy {
-	struct c2wr_qp_destroy_req req;
-	struct c2wr_qp_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * The CCWR_QP_CONNECT msg is posted on the verbs request queue.  It can
- * only be posted when a QP is in IDLE state.  After the connect request is
- * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
- * No synchronous reply from adapter to this WR.  The results of
- * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
- * See c2wr_ae_active_connect_results_t
- */
-struct c2wr_qp_connect_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 qp_handle;
-	__be32 remote_addr;
-	__be16 remote_port;
-	u16 pad;
-	__be32 private_data_length;
-	u8 private_data[0];	/* Private data in-line. */
-} __attribute__((packed)) ;
-
-struct c2wr_qp_connect {
-	struct c2wr_qp_connect_req req;
-	/* no synchronous reply.         */
-} __attribute__((packed)) ;
-
-
-/*
- *------------------------ MM ------------------------
- */
-
-struct c2wr_nsmr_stag_alloc_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 pbl_depth;
-	u32 pd_id;
-	u32 flags;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_stag_alloc_rep {
-	struct c2wr_hdr hdr;
-	u32 pbl_depth;
-	u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_stag_alloc {
-	struct c2wr_nsmr_stag_alloc_req req;
-	struct c2wr_nsmr_stag_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_req {
-	struct c2wr_hdr hdr;
-	__be64 va;
-	u32 rnic_handle;
-	__be16 flags;
-	u8 stag_key;
-	u8 pad;
-	u32 pd_id;
-	__be32 pbl_depth;
-	__be32 pbe_size;
-	__be32 fbo;
-	__be32 length;
-	__be32 addrs_length;
-	/* array of paddrs (must be aligned on a 64bit boundary) */
-	__be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_rep {
-	struct c2wr_hdr hdr;
-	u32 pbl_depth;
-	__be32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_register {
-	struct c2wr_nsmr_register_req req;
-	struct c2wr_nsmr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	__be32 flags;
-	__be32 stag_index;
-	__be32 addrs_length;
-	/* array of paddrs (must be aligned on a 64bit boundary) */
-	__be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_pbl {
-	struct c2wr_nsmr_pbl_req req;
-	struct c2wr_nsmr_pbl_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_rep {
-	struct c2wr_hdr hdr;
-	u8 stag_key;
-	u8 pad[3];
-	u32 pd_id;
-	u32 flags;
-	u32 pbl_depth;
-} __attribute__((packed)) ;
-
-union c2wr_mr_query {
-	struct c2wr_mr_query_req req;
-	struct c2wr_mr_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_rep {
-	struct c2wr_hdr hdr;
-	u8 stag_key;
-	u8 pad[3];
-	u32 pd_id;
-	u32 flags;
-} __attribute__((packed)) ;
-
-union c2wr_mw_query {
-	struct c2wr_mw_query_req req;
-	struct c2wr_mw_query_rep rep;
-} __attribute__((packed)) ;
-
-
-struct c2wr_stag_dealloc_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	__be32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_stag_dealloc_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_stag_dealloc {
-	struct c2wr_stag_dealloc_req req;
-	struct c2wr_stag_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_req {
-	struct c2wr_hdr hdr;
-	u64 va;
-	u32 rnic_handle;
-	u16 flags;
-	u8 stag_key;
-	u8 pad;
-	u32 stag_index;
-	u32 pd_id;
-	u32 pbl_depth;
-	u32 pbe_size;
-	u32 fbo;
-	u32 length;
-	u32 addrs_length;
-	u32 pad1;
-	/* array of paddrs (must be aligned on a 64bit boundary) */
-	u64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_rep {
-	struct c2wr_hdr hdr;
-	u32 pbl_depth;
-	u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_reregister {
-	struct c2wr_nsmr_reregister_req req;
-	struct c2wr_nsmr_reregister_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_req {
-	struct c2wr_hdr hdr;
-	u64 va;
-	u32 rnic_handle;
-	u16 flags;
-	u8 stag_key;
-	u8 pad;
-	u32 stag_index;
-	u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_rep {
-	struct c2wr_hdr hdr;
-	u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_smr_register {
-	struct c2wr_smr_register_req req;
-	struct c2wr_smr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_rep {
-	struct c2wr_hdr hdr;
-	u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_mw_alloc {
-	struct c2wr_mw_alloc_req req;
-	struct c2wr_mw_alloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ WRs -----------------------
- */
-
-struct c2wr_user_hdr {
-	struct c2wr_hdr hdr;		/* Has status and WR Type */
-} __attribute__((packed)) ;
-
-enum c2_qp_state {
-	C2_QP_STATE_IDLE = 0x01,
-	C2_QP_STATE_CONNECTING = 0x02,
-	C2_QP_STATE_RTS = 0x04,
-	C2_QP_STATE_CLOSING = 0x08,
-	C2_QP_STATE_TERMINATE = 0x10,
-	C2_QP_STATE_ERROR = 0x20,
-};
-
-/* Completion queue entry. */
-struct c2wr_ce {
-	struct c2wr_hdr hdr;		/* Has status and WR Type */
-	u64 qp_user_context;	/* c2_user_qp_t * */
-	u32 qp_state;		/* Current QP State */
-	u32 handle;		/* QPID or EP Handle */
-	__be32 bytes_rcvd;		/* valid for RECV WCs */
-	u32 stag;
-} __attribute__((packed)) ;
-
-
-/*
- * Flags used for all post-sq WRs.  These must fit in the flags
- * field of the struct c2wr_hdr (eight bits).
- */
-enum {
-	SQ_SIGNALED = 0x01,
-	SQ_READ_FENCE = 0x02,
-	SQ_FENCE = 0x04,
-};
-
-/*
- * Common fields for all post-sq WRs.  Namely the standard header and a
- * secondary header with fields common to all post-sq WRs.
- */
-struct c2_sq_hdr {
-	struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * Same as above but for post-rq WRs.
- */
-struct c2_rq_hdr {
-	struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * use the same struct for all sends.
- */
-struct c2wr_send_req {
-	struct c2_sq_hdr sq_hdr;
-	__be32 sge_len;
-	__be32 remote_stag;
-	u8 data[0];		/* SGE array */
-} __attribute__((packed));
-
-union c2wr_send {
-	struct c2wr_send_req req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_write_req {
-	struct c2_sq_hdr sq_hdr;
-	__be64 remote_to;
-	__be32 remote_stag;
-	__be32 sge_len;
-	u8 data[0];		/* SGE array */
-} __attribute__((packed));
-
-union c2wr_rdma_write {
-	struct c2wr_rdma_write_req req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_read_req {
-	struct c2_sq_hdr sq_hdr;
-	__be64 local_to;
-	__be64 remote_to;
-	__be32 local_stag;
-	__be32 remote_stag;
-	__be32 length;
-} __attribute__((packed));
-
-union c2wr_rdma_read {
-	struct c2wr_rdma_read_req req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_mw_bind_req {
-	struct c2_sq_hdr sq_hdr;
-	u64 va;
-	u8 stag_key;
-	u8 pad[3];
-	u32 mw_stag_index;
-	u32 mr_stag_index;
-	u32 length;
-	u32 flags;
-} __attribute__((packed));
-
-union c2wr_mw_bind {
-	struct c2wr_mw_bind_req req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_nsmr_fastreg_req {
-	struct c2_sq_hdr sq_hdr;
-	u64 va;
-	u8 stag_key;
-	u8 pad[3];
-	u32 stag_index;
-	u32 pbe_size;
-	u32 fbo;
-	u32 length;
-	u32 addrs_length;
-	/* array of paddrs (must be aligned on a 64bit boundary) */
-	u64 paddrs[0];
-} __attribute__((packed));
-
-union c2wr_nsmr_fastreg {
-	struct c2wr_nsmr_fastreg_req req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_stag_invalidate_req {
-	struct c2_sq_hdr sq_hdr;
-	u8 stag_key;
-	u8 pad[3];
-	u32 stag_index;
-} __attribute__((packed));
-
-union c2wr_stag_invalidate {
-	struct c2wr_stag_invalidate_req req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-union c2wr_sqwr {
-	struct c2_sq_hdr sq_hdr;
-	struct c2wr_send_req send;
-	struct c2wr_send_req send_se;
-	struct c2wr_send_req send_inv;
-	struct c2wr_send_req send_se_inv;
-	struct c2wr_rdma_write_req rdma_write;
-	struct c2wr_rdma_read_req rdma_read;
-	struct c2wr_mw_bind_req mw_bind;
-	struct c2wr_nsmr_fastreg_req nsmr_fastreg;
-	struct c2wr_stag_invalidate_req stag_inv;
-} __attribute__((packed));
-
-
-/*
- * RQ WRs
- */
-struct c2wr_rqwr {
-	struct c2_rq_hdr rq_hdr;
-	u8 data[0];		/* array of SGEs */
-} __attribute__((packed));
-
-union c2wr_recv {
-	struct c2wr_rqwr req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-/*
- * All AEs start with this header.  Most AEs only need to convey the
- * information in the header.  Some, like LLP connection events, need
- * more info.  The union typdef c2wr_ae_t has all the possible AEs.
- *
- * hdr.context is the user_context from the rnic_open WR.  NULL If this
- * is not affiliated with an rnic
- *
- * hdr.id is the AE identifier (eg;  CCAE_REMOTE_SHUTDOWN,
- * CCAE_LLP_CLOSE_COMPLETE)
- *
- * resource_type is one of:  C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
- *
- * user_context is the context passed down when the host created the resource.
- */
-struct c2wr_ae_hdr {
-	struct c2wr_hdr hdr;
-	u64 user_context;	/* user context for this res. */
-	__be32 resource_type;	/* see enum c2_resource_indicator */
-	__be32 resource;	/* handle for resource */
-	__be32 qp_state;	/* current QP State */
-} __attribute__((packed));
-
-/*
- * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
- * the adapter moves the QP into RTS state
- */
-struct c2wr_ae_active_connect_results {
-	struct c2wr_ae_hdr ae_hdr;
-	__be32 laddr;
-	__be32 raddr;
-	__be16 lport;
-	__be16 rport;
-	__be32 private_data_length;
-	u8 private_data[0];	/* data is in-line in the msg. */
-} __attribute__((packed));
-
-/*
- * When connections are established by the stack (and the private data
- * MPA frame is received), the adapter will generate an event to the host.
- * The details of the connection, any private data, and the new connection
- * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
- * AE queue:
- */
-struct c2wr_ae_connection_request {
-	struct c2wr_ae_hdr ae_hdr;
-	u32 cr_handle;		/* connreq handle (sock ptr) */
-	__be32 laddr;
-	__be32 raddr;
-	__be16 lport;
-	__be16 rport;
-	__be32 private_data_length;
-	u8 private_data[0];	/* data is in-line in the msg. */
-} __attribute__((packed));
-
-union c2wr_ae {
-	struct c2wr_ae_hdr ae_generic;
-	struct c2wr_ae_active_connect_results ae_active_connect_results;
-	struct c2wr_ae_connection_request ae_connection_request;
-} __attribute__((packed));
-
-struct c2wr_init_req {
-	struct c2wr_hdr hdr;
-	__be64 hint_count;
-	__be64 q0_host_shared;
-	__be64 q1_host_shared;
-	__be64 q1_host_msg_pool;
-	__be64 q2_host_shared;
-	__be64 q2_host_msg_pool;
-} __attribute__((packed));
-
-struct c2wr_init_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_init {
-	struct c2wr_init_req req;
-	struct c2wr_init_rep rep;
-} __attribute__((packed));
-
-/*
- * For upgrading flash.
- */
-
-struct c2wr_flash_init_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-} __attribute__((packed));
-
-struct c2wr_flash_init_rep {
-	struct c2wr_hdr hdr;
-	u32 adapter_flash_buf_offset;
-	u32 adapter_flash_len;
-} __attribute__((packed));
-
-union c2wr_flash_init {
-	struct c2wr_flash_init_req req;
-	struct c2wr_flash_init_rep rep;
-} __attribute__((packed));
-
-struct c2wr_flash_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 len;
-} __attribute__((packed));
-
-struct c2wr_flash_rep {
-	struct c2wr_hdr hdr;
-	u32 status;
-} __attribute__((packed));
-
-union c2wr_flash {
-	struct c2wr_flash_req req;
-	struct c2wr_flash_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 size;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_rep {
-	struct c2wr_hdr hdr;
-	u32 offset;		/* 0 if mem not available */
-	u32 size;		/* 0 if mem not available */
-} __attribute__((packed));
-
-union c2wr_buf_alloc {
-	struct c2wr_buf_alloc_req req;
-	struct c2wr_buf_alloc_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_free_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 offset;		/* Must match value from alloc */
-	u32 size;		/* Must match value from alloc */
-} __attribute__((packed));
-
-struct c2wr_buf_free_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_buf_free {
-	struct c2wr_buf_free_req req;
-	struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_flash_write_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 offset;
-	u32 size;
-	u32 type;
-	u32 flags;
-} __attribute__((packed));
-
-struct c2wr_flash_write_rep {
-	struct c2wr_hdr hdr;
-	u32 status;
-} __attribute__((packed));
-
-union c2wr_flash_write {
-	struct c2wr_flash_write_req req;
-	struct c2wr_flash_write_rep rep;
-} __attribute__((packed));
-
-/*
- * Messages for LLP connection setup.
- */
-
-/*
- * Listen Request.  This allocates a listening endpoint to allow passive
- * connection setup.  Newly established LLP connections are passed up
- * via an AE.  See c2wr_ae_connection_request_t
- */
-struct c2wr_ep_listen_create_req {
-	struct c2wr_hdr hdr;
-	u64 user_context;	/* returned in AEs. */
-	u32 rnic_handle;
-	__be32 local_addr;		/* local addr, or 0  */
-	__be16 local_port;		/* 0 means "pick one" */
-	u16 pad;
-	__be32 backlog;		/* tradional tcp listen bl */
-} __attribute__((packed));
-
-struct c2wr_ep_listen_create_rep {
-	struct c2wr_hdr hdr;
-	u32 ep_handle;		/* handle to new listening ep */
-	u16 local_port;		/* resulting port... */
-	u16 pad;
-} __attribute__((packed));
-
-union c2wr_ep_listen_create {
-	struct c2wr_ep_listen_create_req req;
-	struct c2wr_ep_listen_create_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_ep_listen_destroy {
-	struct c2wr_ep_listen_destroy_req req;
-	struct c2wr_ep_listen_destroy_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_query_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_query_rep {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 local_addr;
-	u32 remote_addr;
-	u16 local_port;
-	u16 remote_port;
-} __attribute__((packed));
-
-union c2wr_ep_query {
-	struct c2wr_ep_query_req req;
-	struct c2wr_ep_query_rep rep;
-} __attribute__((packed));
-
-
-/*
- * The host passes this down to indicate acceptance of a pending iWARP
- * connection.  The cr_handle was obtained from the CONNECTION_REQUEST
- * AE passed up by the adapter.  See c2wr_ae_connection_request_t.
- */
-struct c2wr_cr_accept_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 qp_handle;		/* QP to bind to this LLP conn */
-	u32 ep_handle;		/* LLP  handle to accept */
-	__be32 private_data_length;
-	u8 private_data[0];	/* data in-line in msg. */
-} __attribute__((packed));
-
-/*
- * adapter sends reply when private data is successfully submitted to
- * the LLP.
- */
-struct c2wr_cr_accept_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_accept {
-	struct c2wr_cr_accept_req req;
-	struct c2wr_cr_accept_rep rep;
-} __attribute__((packed));
-
-/*
- * The host sends this down if a given iWARP connection request was
- * rejected by the consumer.  The cr_handle was obtained from a
- * previous c2wr_ae_connection_request_t AE sent by the adapter.
- */
-struct  c2wr_cr_reject_req {
-	struct c2wr_hdr hdr;
-	u32 rnic_handle;
-	u32 ep_handle;		/* LLP handle to reject */
-} __attribute__((packed));
-
-/*
- * Dunno if this is needed, but we'll add it for now.  The adapter will
- * send the reject_reply after the LLP endpoint has been destroyed.
- */
-struct  c2wr_cr_reject_rep {
-	struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_reject {
-	struct c2wr_cr_reject_req req;
-	struct c2wr_cr_reject_rep rep;
-} __attribute__((packed));
-
-/*
- * console command.  Used to implement a debug console over the verbs
- * request and reply queues.
- */
-
-/*
- * Console request message.  It contains:
- *	- message hdr with id = CCWR_CONSOLE
- *	- the physaddr/len of host memory to be used for the reply.
- *	- the command string.  eg:  "netstat -s" or "zoneinfo"
- */
-struct c2wr_console_req {
-	struct c2wr_hdr hdr;		/* id = CCWR_CONSOLE */
-	u64 reply_buf;		/* pinned host buf for reply */
-	u32 reply_buf_len;	/* length of reply buffer */
-	u8 command[0];		/* NUL terminated ascii string */
-	/* containing the command req */
-} __attribute__((packed));
-
-/*
- * flags used in the console reply.
- */
-enum c2_console_flags {
-	CONS_REPLY_TRUNCATED = 0x00000001	/* reply was truncated */
-} __attribute__((packed));
-
-/*
- * Console reply message.
- * hdr.result contains the c2_status_t error if the reply was _not_ generated,
- * or C2_OK if the reply was generated.
- */
-struct c2wr_console_rep {
-	struct c2wr_hdr hdr;		/* id = CCWR_CONSOLE */
-	u32 flags;
-} __attribute__((packed));
-
-union c2wr_console {
-	struct c2wr_console_req req;
-	struct c2wr_console_rep rep;
-} __attribute__((packed));
-
-
-/*
- * Giant union with all WRs.  Makes life easier...
- */
-union c2wr {
-	struct c2wr_hdr hdr;
-	struct c2wr_user_hdr user_hdr;
-	union c2wr_rnic_open rnic_open;
-	union c2wr_rnic_query rnic_query;
-	union c2wr_rnic_getconfig rnic_getconfig;
-	union c2wr_rnic_setconfig rnic_setconfig;
-	union c2wr_rnic_close rnic_close;
-	union c2wr_cq_create cq_create;
-	union c2wr_cq_modify cq_modify;
-	union c2wr_cq_destroy cq_destroy;
-	union c2wr_pd_alloc pd_alloc;
-	union c2wr_pd_dealloc pd_dealloc;
-	union c2wr_srq_create srq_create;
-	union c2wr_srq_destroy srq_destroy;
-	union c2wr_qp_create qp_create;
-	union c2wr_qp_query qp_query;
-	union c2wr_qp_modify qp_modify;
-	union c2wr_qp_destroy qp_destroy;
-	struct c2wr_qp_connect qp_connect;
-	union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
-	union c2wr_nsmr_register nsmr_register;
-	union c2wr_nsmr_pbl nsmr_pbl;
-	union c2wr_mr_query mr_query;
-	union c2wr_mw_query mw_query;
-	union c2wr_stag_dealloc stag_dealloc;
-	union c2wr_sqwr sqwr;
-	struct c2wr_rqwr rqwr;
-	struct c2wr_ce ce;
-	union c2wr_ae ae;
-	union c2wr_init init;
-	union c2wr_ep_listen_create ep_listen_create;
-	union c2wr_ep_listen_destroy ep_listen_destroy;
-	union c2wr_cr_accept cr_accept;
-	union c2wr_cr_reject cr_reject;
-	union c2wr_console console;
-	union c2wr_flash_init flash_init;
-	union c2wr_flash flash;
-	union c2wr_buf_alloc buf_alloc;
-	union c2wr_buf_free buf_free;
-	union c2wr_flash_write flash_write;
-} __attribute__((packed));
-
-
-/*
- * Accessors for the wr fields that are packed together tightly to
- * reduce the wr message size.  The wr arguments are void* so that
- * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
- * in the struct c2wr union can be passed in.
- */
-static __inline__ u8 c2_wr_get_id(void *wr)
-{
-	return ((struct c2wr_hdr *) wr)->id;
-}
-static __inline__ void c2_wr_set_id(void *wr, u8 id)
-{
-	((struct c2wr_hdr *) wr)->id = id;
-}
-static __inline__ u8 c2_wr_get_result(void *wr)
-{
-	return ((struct c2wr_hdr *) wr)->result;
-}
-static __inline__ void c2_wr_set_result(void *wr, u8 result)
-{
-	((struct c2wr_hdr *) wr)->result = result;
-}
-static __inline__ u8 c2_wr_get_flags(void *wr)
-{
-	return ((struct c2wr_hdr *) wr)->flags;
-}
-static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
-{
-	((struct c2wr_hdr *) wr)->flags = flags;
-}
-static __inline__ u8 c2_wr_get_sge_count(void *wr)
-{
-	return ((struct c2wr_hdr *) wr)->sge_count;
-}
-static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
-{
-	((struct c2wr_hdr *) wr)->sge_count = sge_count;
-}
-static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
-{
-	return ((struct c2wr_hdr *) wr)->wqe_count;
-}
-static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
-{
-	((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
-}
-
-#endif				/* _C2_WR_H_ */
diff --git a/drivers/staging/rdma/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
deleted file mode 100644
index 3fadd2a..0000000
--- a/drivers/staging/rdma/ehca/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config INFINIBAND_EHCA
-	tristate "eHCA support"
-	depends on IBMEBUS
-	---help---
-	This driver supports the deprecated IBM pSeries eHCA InfiniBand
-	adapter.
-
-	To compile the driver as a module, choose M here. The module
-	will be called ib_ehca.
-
diff --git a/drivers/staging/rdma/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
deleted file mode 100644
index 74d284e..0000000
--- a/drivers/staging/rdma/ehca/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-#  Authors: Heiko J Schick <schickhj@de.ibm.com>
-#           Christoph Raisch <raisch@de.ibm.com>
-#           Joachim Fenkes <fenkes@de.ibm.com>
-#
-#  Copyright (c) 2005 IBM Corporation
-#
-#  All rights reserved.
-#
-#  This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
-
-obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
-
-ib_ehca-objs  = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
-		ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
-		ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
-
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
deleted file mode 100644
index 199a4a6..0000000
--- a/drivers/staging/rdma/ehca/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-9/2015
-
-The ehca driver has been deprecated and moved to drivers/staging/rdma.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
deleted file mode 100644
index 94e088c..0000000
--- a/drivers/staging/rdma/ehca/ehca_av.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  address vector functions
- *
- *  Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Khadija Souissi <souissik@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *av_cache;
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
-		  enum ib_rate path_rate, u32 *ipd)
-{
-	int path = ib_rate_to_mult(path_rate);
-	int link, ret;
-	struct ib_port_attr pa;
-
-	if (path_rate == IB_RATE_PORT_CURRENT) {
-		*ipd = 0;
-		return 0;
-	}
-
-	if (unlikely(path < 0)) {
-		ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
-			 path_rate);
-		return -EINVAL;
-	}
-
-	ret = ehca_query_port(&shca->ib_device, port, &pa);
-	if (unlikely(ret < 0)) {
-		ehca_err(&shca->ib_device, "Failed to query port  ret=%i", ret);
-		return ret;
-	}
-
-	link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
-
-	if (path >= link)
-		/* no need to throttle if path faster than link */
-		*ipd = 0;
-	else
-		/* IPD = round((link / path) - 1) */
-		*ipd = ((link + (path >> 1)) / path) - 1;
-
-	return 0;
-}
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
-	int ret;
-	struct ehca_av *av;
-	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-					      ib_device);
-
-	av = kmem_cache_alloc(av_cache, GFP_KERNEL);
-	if (!av) {
-		ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
-			 pd, ah_attr);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	av->av.sl = ah_attr->sl;
-	av->av.dlid = ah_attr->dlid;
-	av->av.slid_path_bits = ah_attr->src_path_bits;
-
-	if (ehca_static_rate < 0) {
-		u32 ipd;
-
-		if (ehca_calc_ipd(shca, ah_attr->port_num,
-				  ah_attr->static_rate, &ipd)) {
-			ret = -EINVAL;
-			goto create_ah_exit1;
-		}
-		av->av.ipd = ipd;
-	} else
-		av->av.ipd = ehca_static_rate;
-
-	av->av.lnh = ah_attr->ah_flags;
-	av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
-	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
-					    ah_attr->grh.traffic_class);
-	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
-					    ah_attr->grh.flow_label);
-	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
-					    ah_attr->grh.hop_limit);
-	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
-	/* set sgid in grh.word_1 */
-	if (ah_attr->ah_flags & IB_AH_GRH) {
-		int rc;
-		struct ib_port_attr port_attr;
-		union ib_gid gid;
-
-		memset(&port_attr, 0, sizeof(port_attr));
-		rc = ehca_query_port(pd->device, ah_attr->port_num,
-				     &port_attr);
-		if (rc) { /* invalid port number */
-			ret = -EINVAL;
-			ehca_err(pd->device, "Invalid port number "
-				 "ehca_query_port() returned %x "
-				 "pd=%p ah_attr=%p", rc, pd, ah_attr);
-			goto create_ah_exit1;
-		}
-		memset(&gid, 0, sizeof(gid));
-		rc = ehca_query_gid(pd->device,
-				    ah_attr->port_num,
-				    ah_attr->grh.sgid_index, &gid);
-		if (rc) {
-			ret = -EINVAL;
-			ehca_err(pd->device, "Failed to retrieve sgid "
-				 "ehca_query_gid() returned %x "
-				 "pd=%p ah_attr=%p", rc, pd, ah_attr);
-			goto create_ah_exit1;
-		}
-		memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
-	}
-	av->av.pmtu = shca->max_mtu;
-
-	/* dgid comes in grh.word_3 */
-	memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
-	       sizeof(ah_attr->grh.dgid));
-
-	return &av->ib_ah;
-
-create_ah_exit1:
-	kmem_cache_free(av_cache, av);
-
-	return ERR_PTR(ret);
-}
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
-	struct ehca_av *av;
-	struct ehca_ud_av new_ehca_av;
-	struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
-					      ib_device);
-
-	memset(&new_ehca_av, 0, sizeof(new_ehca_av));
-	new_ehca_av.sl = ah_attr->sl;
-	new_ehca_av.dlid = ah_attr->dlid;
-	new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
-	new_ehca_av.ipd = ah_attr->static_rate;
-	new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
-					 (ah_attr->ah_flags & IB_AH_GRH) > 0);
-	new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
-						ah_attr->grh.traffic_class);
-	new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
-						 ah_attr->grh.flow_label);
-	new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
-						 ah_attr->grh.hop_limit);
-	new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
-
-	/* set sgid in grh.word_1 */
-	if (ah_attr->ah_flags & IB_AH_GRH) {
-		int rc;
-		struct ib_port_attr port_attr;
-		union ib_gid gid;
-
-		memset(&port_attr, 0, sizeof(port_attr));
-		rc = ehca_query_port(ah->device, ah_attr->port_num,
-				     &port_attr);
-		if (rc) { /* invalid port number */
-			ehca_err(ah->device, "Invalid port number "
-				 "ehca_query_port() returned %x "
-				 "ah=%p ah_attr=%p port_num=%x",
-				 rc, ah, ah_attr, ah_attr->port_num);
-			return -EINVAL;
-		}
-		memset(&gid, 0, sizeof(gid));
-		rc = ehca_query_gid(ah->device,
-				    ah_attr->port_num,
-				    ah_attr->grh.sgid_index, &gid);
-		if (rc) {
-			ehca_err(ah->device, "Failed to retrieve sgid "
-				 "ehca_query_gid() returned %x "
-				 "ah=%p ah_attr=%p port_num=%x "
-				 "sgid_index=%x",
-				 rc, ah, ah_attr, ah_attr->port_num,
-				 ah_attr->grh.sgid_index);
-			return -EINVAL;
-		}
-		memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
-	}
-
-	new_ehca_av.pmtu = shca->max_mtu;
-
-	memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
-	       sizeof(ah_attr->grh.dgid));
-
-	av = container_of(ah, struct ehca_av, ib_ah);
-	av->av = new_ehca_av;
-
-	return 0;
-}
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
-	struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
-
-	memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
-	       sizeof(ah_attr->grh.dgid));
-	ah_attr->sl = av->av.sl;
-
-	ah_attr->dlid = av->av.dlid;
-
-	ah_attr->src_path_bits = av->av.slid_path_bits;
-	ah_attr->static_rate = av->av.ipd;
-	ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
-	ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
-						    av->av.grh.word_0);
-	ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
-						av->av.grh.word_0);
-	ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
-						 av->av.grh.word_0);
-
-	return 0;
-}
-
-int ehca_destroy_ah(struct ib_ah *ah)
-{
-	kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
-
-	return 0;
-}
-
-int ehca_init_av_cache(void)
-{
-	av_cache = kmem_cache_create("ehca_cache_av",
-				   sizeof(struct ehca_av), 0,
-				   SLAB_HWCACHE_ALIGN,
-				   NULL);
-	if (!av_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void ehca_cleanup_av_cache(void)
-{
-	kmem_cache_destroy(av_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
deleted file mode 100644
index e8c3387..0000000
--- a/drivers/staging/rdma/ehca/ehca_classes.h
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Struct definition for eHCA internal structures
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_H__
-#define __EHCA_CLASSES_H__
-
-struct ehca_module;
-struct ehca_qp;
-struct ehca_cq;
-struct ehca_eq;
-struct ehca_mr;
-struct ehca_mw;
-struct ehca_pd;
-struct ehca_av;
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-
-#ifdef CONFIG_PPC64
-#include "ehca_classes_pSeries.h"
-#endif
-#include "ipz_pt_fn.h"
-#include "ehca_qes.h"
-#include "ehca_irq.h"
-
-#define EHCA_EQE_CACHE_SIZE 20
-#define EHCA_MAX_NUM_QUEUES 0xffff
-
-struct ehca_eqe_cache_entry {
-	struct ehca_eqe *eqe;
-	struct ehca_cq *cq;
-};
-
-struct ehca_eq {
-	u32 length;
-	struct ipz_queue ipz_queue;
-	struct ipz_eq_handle ipz_eq_handle;
-	struct work_struct work;
-	struct h_galpas galpas;
-	int is_initialized;
-	struct ehca_pfeq pf;
-	spinlock_t spinlock;
-	struct tasklet_struct interrupt_task;
-	u32 ist;
-	spinlock_t irq_spinlock;
-	struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
-};
-
-struct ehca_sma_attr {
-	u16 lid, lmc, sm_sl, sm_lid;
-	u16 pkey_tbl_len, pkeys[16];
-};
-
-struct ehca_sport {
-	struct ib_cq *ibcq_aqp1;
-	struct ib_qp *ibqp_sqp[2];
-	/* lock to serialze modify_qp() calls for sqp in normal
-	 * and irq path (when event PORT_ACTIVE is received first time)
-	 */
-	spinlock_t mod_sqp_lock;
-	enum ib_port_state port_state;
-	struct ehca_sma_attr saved_attr;
-	u32 pma_qp_nr;
-};
-
-#define HCA_CAP_MR_PGSIZE_4K  0x80000000
-#define HCA_CAP_MR_PGSIZE_64K 0x40000000
-#define HCA_CAP_MR_PGSIZE_1M  0x20000000
-#define HCA_CAP_MR_PGSIZE_16M 0x10000000
-
-struct ehca_shca {
-	struct ib_device ib_device;
-	struct platform_device *ofdev;
-	u8 num_ports;
-	int hw_level;
-	struct list_head shca_list;
-	struct ipz_adapter_handle ipz_hca_handle;
-	struct ehca_sport sport[2];
-	struct ehca_eq eq;
-	struct ehca_eq neq;
-	struct ehca_mr *maxmr;
-	struct ehca_pd *pd;
-	struct h_galpas galpas;
-	struct mutex modify_mutex;
-	u64 hca_cap;
-	/* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
-	u32 hca_cap_mr_pgsize;
-	int max_mtu;
-	int max_num_qps;
-	int max_num_cqs;
-	atomic_t num_cqs;
-	atomic_t num_qps;
-};
-
-struct ehca_pd {
-	struct ib_pd ib_pd;
-	struct ipz_pd fw_pd;
-	/* small queue mgmt */
-	struct mutex lock;
-	struct list_head free[2];
-	struct list_head full[2];
-};
-
-enum ehca_ext_qp_type {
-	EQPT_NORMAL    = 0,
-	EQPT_LLQP      = 1,
-	EQPT_SRQBASE   = 2,
-	EQPT_SRQ       = 3,
-};
-
-/* struct to cache modify_qp()'s parms for GSI/SMI qp */
-struct ehca_mod_qp_parm {
-	int mask;
-	struct ib_qp_attr attr;
-};
-
-#define EHCA_MOD_QP_PARM_MAX 4
-
-#define QMAP_IDX_MASK 0xFFFFULL
-
-/* struct for tracking if cqes have been reported to the application */
-struct ehca_qmap_entry {
-	u16 app_wr_id;
-	u8 reported;
-	u8 cqe_req;
-};
-
-struct ehca_queue_map {
-	struct ehca_qmap_entry *map;
-	unsigned int entries;
-	unsigned int tail;
-	unsigned int left_to_poll;
-	unsigned int next_wqe_idx;   /* Idx to first wqe to be flushed */
-};
-
-/* function to calculate the next index for the qmap */
-static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
-{
-	unsigned int temp = cur_index + 1;
-	return (temp == limit) ? 0 : temp;
-}
-
-struct ehca_qp {
-	union {
-		struct ib_qp ib_qp;
-		struct ib_srq ib_srq;
-	};
-	u32 qp_type;
-	enum ehca_ext_qp_type ext_type;
-	enum ib_qp_state state;
-	struct ipz_queue ipz_squeue;
-	struct ehca_queue_map sq_map;
-	struct ipz_queue ipz_rqueue;
-	struct ehca_queue_map rq_map;
-	struct h_galpas galpas;
-	u32 qkey;
-	u32 real_qp_num;
-	u32 token;
-	spinlock_t spinlock_s;
-	spinlock_t spinlock_r;
-	u32 sq_max_inline_data_size;
-	struct ipz_qp_handle ipz_qp_handle;
-	struct ehca_pfqp pf;
-	struct ib_qp_init_attr init_attr;
-	struct ehca_cq *send_cq;
-	struct ehca_cq *recv_cq;
-	unsigned int sqerr_purgeflag;
-	struct hlist_node list_entries;
-	/* array to cache modify_qp()'s parms for GSI/SMI qp */
-	struct ehca_mod_qp_parm *mod_qp_parm;
-	int mod_qp_parm_idx;
-	/* mmap counter for resources mapped into user space */
-	u32 mm_count_squeue;
-	u32 mm_count_rqueue;
-	u32 mm_count_galpa;
-	/* unsolicited ack circumvention */
-	int unsol_ack_circ;
-	int mtu_shift;
-	u32 message_count;
-	u32 packet_count;
-	atomic_t nr_events; /* events seen */
-	wait_queue_head_t wait_completion;
-	int mig_armed;
-	struct list_head sq_err_node;
-	struct list_head rq_err_node;
-};
-
-#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
-#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
-#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
-
-/* must be power of 2 */
-#define QP_HASHTAB_LEN 8
-
-struct ehca_cq {
-	struct ib_cq ib_cq;
-	struct ipz_queue ipz_queue;
-	struct h_galpas galpas;
-	spinlock_t spinlock;
-	u32 cq_number;
-	u32 token;
-	u32 nr_of_entries;
-	struct ipz_cq_handle ipz_cq_handle;
-	struct ehca_pfcq pf;
-	spinlock_t cb_lock;
-	struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
-	struct list_head entry;
-	u32 nr_callbacks;   /* #events assigned to cpu by scaling code */
-	atomic_t nr_events; /* #events seen */
-	wait_queue_head_t wait_completion;
-	spinlock_t task_lock;
-	/* mmap counter for resources mapped into user space */
-	u32 mm_count_queue;
-	u32 mm_count_galpa;
-	struct list_head sqp_err_list;
-	struct list_head rqp_err_list;
-};
-
-enum ehca_mr_flag {
-	EHCA_MR_FLAG_FMR = 0x80000000,	 /* FMR, created with ehca_alloc_fmr */
-	EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR                           */
-};
-
-struct ehca_mr {
-	union {
-		struct ib_mr ib_mr;	/* must always be first in ehca_mr */
-		struct ib_fmr ib_fmr;	/* must always be first in ehca_mr */
-	} ib;
-	struct ib_umem *umem;
-	spinlock_t mrlock;
-
-	enum ehca_mr_flag flags;
-	u32 num_kpages;		/* number of kernel pages */
-	u32 num_hwpages;	/* number of hw pages to form MR */
-	u64 hwpage_size;	/* hw page size used for this MR */
-	int acl;		/* ACL (stored here for usage in reregister) */
-	u64 *start;		/* virtual start address (stored here for */
-				/* usage in reregister) */
-	u64 size;		/* size (stored here for usage in reregister) */
-	u32 fmr_page_size;	/* page size for FMR */
-	u32 fmr_max_pages;	/* max pages for FMR */
-	u32 fmr_max_maps;	/* max outstanding maps for FMR */
-	u32 fmr_map_cnt;	/* map counter for FMR */
-	/* fw specific data */
-	struct ipz_mrmw_handle ipz_mr_handle;	/* MR handle for h-calls */
-	struct h_galpas galpas;
-};
-
-struct ehca_mw {
-	struct ib_mw ib_mw;	/* gen2 mw, must always be first in ehca_mw */
-	spinlock_t mwlock;
-
-	u8 never_bound;		/* indication MW was never bound */
-	struct ipz_mrmw_handle ipz_mw_handle;	/* MW handle for h-calls */
-	struct h_galpas galpas;
-};
-
-enum ehca_mr_pgi_type {
-	EHCA_MR_PGI_PHYS   = 1,  /* type of ehca_reg_phys_mr,
-				  * ehca_rereg_phys_mr,
-				  * ehca_reg_internal_maxmr */
-	EHCA_MR_PGI_USER   = 2,  /* type of ehca_reg_user_mr */
-	EHCA_MR_PGI_FMR    = 3   /* type of ehca_map_phys_fmr */
-};
-
-struct ehca_mr_pginfo {
-	enum ehca_mr_pgi_type type;
-	u64 num_kpages;
-	u64 kpage_cnt;
-	u64 hwpage_size;     /* hw page size used for this MR */
-	u64 num_hwpages;     /* number of hw pages */
-	u64 hwpage_cnt;      /* counter for hw pages */
-	u64 next_hwpage;     /* next hw page in buffer/chunk/listelem */
-
-	union {
-		struct { /* type EHCA_MR_PGI_PHYS section */
-			u64 addr;
-			u16 size;
-		} phy;
-		struct { /* type EHCA_MR_PGI_USER section */
-			struct ib_umem *region;
-			struct scatterlist *next_sg;
-			u64 next_nmap;
-		} usr;
-		struct { /* type EHCA_MR_PGI_FMR section */
-			u64 fmr_pgsize;
-			u64 *page_list;
-			u64 next_listelem;
-		} fmr;
-	} u;
-};
-
-/* output parameters for MR/FMR hipz calls */
-struct ehca_mr_hipzout_parms {
-	struct ipz_mrmw_handle handle;
-	u32 lkey;
-	u32 rkey;
-	u64 len;
-	u64 vaddr;
-	u32 acl;
-};
-
-/* output parameters for MW hipz calls */
-struct ehca_mw_hipzout_parms {
-	struct ipz_mrmw_handle handle;
-	u32 rkey;
-};
-
-struct ehca_av {
-	struct ib_ah ib_ah;
-	struct ehca_ud_av av;
-};
-
-struct ehca_ucontext {
-	struct ib_ucontext ib_ucontext;
-};
-
-int ehca_init_pd_cache(void);
-void ehca_cleanup_pd_cache(void);
-int ehca_init_cq_cache(void);
-void ehca_cleanup_cq_cache(void);
-int ehca_init_qp_cache(void);
-void ehca_cleanup_qp_cache(void);
-int ehca_init_av_cache(void);
-void ehca_cleanup_av_cache(void);
-int ehca_init_mrmw_cache(void);
-void ehca_cleanup_mrmw_cache(void);
-int ehca_init_small_qp_cache(void);
-void ehca_cleanup_small_qp_cache(void);
-
-extern rwlock_t ehca_qp_idr_lock;
-extern rwlock_t ehca_cq_idr_lock;
-extern struct idr ehca_qp_idr;
-extern struct idr ehca_cq_idr;
-extern spinlock_t shca_list_lock;
-
-extern int ehca_static_rate;
-extern int ehca_port_act_time;
-extern bool ehca_use_hp_mr;
-extern bool ehca_scaling_code;
-extern int ehca_lock_hcalls;
-extern int ehca_nr_ports;
-extern int ehca_max_cq;
-extern int ehca_max_qp;
-
-struct ipzu_queue_resp {
-	u32 qe_size;      /* queue entry size */
-	u32 act_nr_of_sg;
-	u32 queue_length; /* queue length allocated in bytes */
-	u32 pagesize;
-	u32 toggle_state;
-	u32 offset; /* save offset within a page for small_qp */
-};
-
-struct ehca_create_cq_resp {
-	u32 cq_number;
-	u32 token;
-	struct ipzu_queue_resp ipz_queue;
-	u32 fw_handle_ofs;
-	u32 dummy;
-};
-
-struct ehca_create_qp_resp {
-	u32 qp_num;
-	u32 token;
-	u32 qp_type;
-	u32 ext_type;
-	u32 qkey;
-	/* qp_num assigned by ehca: sqp0/1 may have got different numbers */
-	u32 real_qp_num;
-	u32 fw_handle_ofs;
-	u32 dummy;
-	struct ipzu_queue_resp ipz_squeue;
-	struct ipzu_queue_resp ipz_rqueue;
-};
-
-struct ehca_alloc_cq_parms {
-	u32 nr_cqe;
-	u32 act_nr_of_entries;
-	u32 act_pages;
-	struct ipz_eq_handle eq_handle;
-};
-
-enum ehca_service_type {
-	ST_RC  = 0,
-	ST_UC  = 1,
-	ST_RD  = 2,
-	ST_UD  = 3,
-};
-
-enum ehca_ll_comp_flags {
-	LLQP_SEND_COMP = 0x20,
-	LLQP_RECV_COMP = 0x40,
-	LLQP_COMP_MASK = 0x60,
-};
-
-struct ehca_alloc_queue_parms {
-	/* input parameters */
-	int max_wr;
-	int max_sge;
-	int page_size;
-	int is_small;
-
-	/* output parameters */
-	u16 act_nr_wqes;
-	u8  act_nr_sges;
-	u32 queue_size; /* bytes for small queues, pages otherwise */
-};
-
-struct ehca_alloc_qp_parms {
-	struct ehca_alloc_queue_parms squeue;
-	struct ehca_alloc_queue_parms rqueue;
-
-	/* input parameters */
-	enum ehca_service_type servicetype;
-	int qp_storage;
-	int sigtype;
-	enum ehca_ext_qp_type ext_type;
-	enum ehca_ll_comp_flags ll_comp_flags;
-	int ud_av_l_key_ctl;
-
-	u32 token;
-	struct ipz_eq_handle eq_handle;
-	struct ipz_pd pd;
-	struct ipz_cq_handle send_cq_handle, recv_cq_handle;
-
-	u32 srq_qpn, srq_token, srq_limit;
-
-	/* output parameters */
-	u32 real_qp_num;
-	struct ipz_qp_handle qp_handle;
-	struct h_galpas galpas;
-};
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
deleted file mode 100644
index 689c357..0000000
--- a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  pSeries interface definitions
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_PSERIES_H__
-#define __EHCA_CLASSES_PSERIES_H__
-
-#include "hcp_phyp.h"
-#include "ipz_pt_fn.h"
-
-
-struct ehca_pfqp {
-	struct ipz_qpt sqpt;
-	struct ipz_qpt rqpt;
-};
-
-struct ehca_pfcq {
-	struct ipz_qpt qpt;
-	u32 cqnr;
-};
-
-struct ehca_pfeq {
-	struct ipz_qpt qpt;
-	struct h_galpa galpa;
-	u32 eqnr;
-};
-
-struct ipz_adapter_handle {
-	u64 handle;
-};
-
-struct ipz_cq_handle {
-	u64 handle;
-};
-
-struct ipz_eq_handle {
-	u64 handle;
-};
-
-struct ipz_qp_handle {
-	u64 handle;
-};
-struct ipz_mrmw_handle {
-	u64 handle;
-};
-
-struct ipz_pd {
-	u32 value;
-};
-
-struct hcp_modify_qp_control_block {
-	u32 qkey;                      /* 00 */
-	u32 rdd;                       /* reliable datagram domain */
-	u32 send_psn;                  /* 02 */
-	u32 receive_psn;               /* 03 */
-	u32 prim_phys_port;            /* 04 */
-	u32 alt_phys_port;             /* 05 */
-	u32 prim_p_key_idx;            /* 06 */
-	u32 alt_p_key_idx;             /* 07 */
-	u32 rdma_atomic_ctrl;          /* 08 */
-	u32 qp_state;                  /* 09 */
-	u32 reserved_10;               /* 10 */
-	u32 rdma_nr_atomic_resp_res;   /* 11 */
-	u32 path_migration_state;      /* 12 */
-	u32 rdma_atomic_outst_dest_qp; /* 13 */
-	u32 dest_qp_nr;                /* 14 */
-	u32 min_rnr_nak_timer_field;   /* 15 */
-	u32 service_level;             /* 16 */
-	u32 send_grh_flag;             /* 17 */
-	u32 retry_count;               /* 18 */
-	u32 timeout;                   /* 19 */
-	u32 path_mtu;                  /* 20 */
-	u32 max_static_rate;           /* 21 */
-	u32 dlid;                      /* 22 */
-	u32 rnr_retry_count;           /* 23 */
-	u32 source_path_bits;          /* 24 */
-	u32 traffic_class;             /* 25 */
-	u32 hop_limit;                 /* 26 */
-	u32 source_gid_idx;            /* 27 */
-	u32 flow_label;                /* 28 */
-	u32 reserved_29;               /* 29 */
-	union {                        /* 30 */
-		u64 dw[2];
-		u8 byte[16];
-	} dest_gid;
-	u32 service_level_al;          /* 34 */
-	u32 send_grh_flag_al;          /* 35 */
-	u32 retry_count_al;            /* 36 */
-	u32 timeout_al;                /* 37 */
-	u32 max_static_rate_al;        /* 38 */
-	u32 dlid_al;                   /* 39 */
-	u32 rnr_retry_count_al;        /* 40 */
-	u32 source_path_bits_al;       /* 41 */
-	u32 traffic_class_al;          /* 42 */
-	u32 hop_limit_al;              /* 43 */
-	u32 source_gid_idx_al;         /* 44 */
-	u32 flow_label_al;             /* 45 */
-	u32 reserved_46;               /* 46 */
-	u32 reserved_47;               /* 47 */
-	union {                        /* 48 */
-		u64 dw[2];
-		u8 byte[16];
-	} dest_gid_al;
-	u32 max_nr_outst_send_wr;      /* 52 */
-	u32 max_nr_outst_recv_wr;      /* 53 */
-	u32 disable_ete_credit_check;  /* 54 */
-	u32 qp_number;                 /* 55 */
-	u64 send_queue_handle;         /* 56 */
-	u64 recv_queue_handle;         /* 58 */
-	u32 actual_nr_sges_in_sq_wqe;  /* 60 */
-	u32 actual_nr_sges_in_rq_wqe;  /* 61 */
-	u32 qp_enable;                 /* 62 */
-	u32 curr_srq_limit;            /* 63 */
-	u64 qp_aff_asyn_ev_log_reg;    /* 64 */
-	u64 shared_rq_hndl;            /* 66 */
-	u64 trigg_doorbell_qp_hndl;    /* 68 */
-	u32 reserved_70_127[58];       /* 70 */
-};
-
-#define MQPCB_MASK_QKEY                         EHCA_BMASK_IBM( 0,  0)
-#define MQPCB_MASK_SEND_PSN                     EHCA_BMASK_IBM( 2,  2)
-#define MQPCB_MASK_RECEIVE_PSN                  EHCA_BMASK_IBM( 3,  3)
-#define MQPCB_MASK_PRIM_PHYS_PORT               EHCA_BMASK_IBM( 4,  4)
-#define MQPCB_PRIM_PHYS_PORT                    EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_PHYS_PORT                EHCA_BMASK_IBM( 5,  5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX               EHCA_BMASK_IBM( 6,  6)
-#define MQPCB_PRIM_P_KEY_IDX                    EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_P_KEY_IDX                EHCA_BMASK_IBM( 7,  7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL             EHCA_BMASK_IBM( 8,  8)
-#define MQPCB_MASK_QP_STATE                     EHCA_BMASK_IBM( 9,  9)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES      EHCA_BMASK_IBM(11, 11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE         EHCA_BMASK_IBM(12, 12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP    EHCA_BMASK_IBM(13, 13)
-#define MQPCB_MASK_DEST_QP_NR                   EHCA_BMASK_IBM(14, 14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD      EHCA_BMASK_IBM(15, 15)
-#define MQPCB_MASK_SERVICE_LEVEL                EHCA_BMASK_IBM(16, 16)
-#define MQPCB_MASK_SEND_GRH_FLAG                EHCA_BMASK_IBM(17, 17)
-#define MQPCB_MASK_RETRY_COUNT                  EHCA_BMASK_IBM(18, 18)
-#define MQPCB_MASK_TIMEOUT                      EHCA_BMASK_IBM(19, 19)
-#define MQPCB_MASK_PATH_MTU                     EHCA_BMASK_IBM(20, 20)
-#define MQPCB_MASK_MAX_STATIC_RATE              EHCA_BMASK_IBM(21, 21)
-#define MQPCB_MASK_DLID                         EHCA_BMASK_IBM(22, 22)
-#define MQPCB_MASK_RNR_RETRY_COUNT              EHCA_BMASK_IBM(23, 23)
-#define MQPCB_MASK_SOURCE_PATH_BITS             EHCA_BMASK_IBM(24, 24)
-#define MQPCB_MASK_TRAFFIC_CLASS                EHCA_BMASK_IBM(25, 25)
-#define MQPCB_MASK_HOP_LIMIT                    EHCA_BMASK_IBM(26, 26)
-#define MQPCB_MASK_SOURCE_GID_IDX               EHCA_BMASK_IBM(27, 27)
-#define MQPCB_MASK_FLOW_LABEL                   EHCA_BMASK_IBM(28, 28)
-#define MQPCB_MASK_DEST_GID                     EHCA_BMASK_IBM(30, 30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL             EHCA_BMASK_IBM(31, 31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL             EHCA_BMASK_IBM(32, 32)
-#define MQPCB_MASK_RETRY_COUNT_AL               EHCA_BMASK_IBM(33, 33)
-#define MQPCB_MASK_TIMEOUT_AL                   EHCA_BMASK_IBM(34, 34)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL           EHCA_BMASK_IBM(35, 35)
-#define MQPCB_MASK_DLID_AL                      EHCA_BMASK_IBM(36, 36)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL           EHCA_BMASK_IBM(37, 37)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL          EHCA_BMASK_IBM(38, 38)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL             EHCA_BMASK_IBM(39, 39)
-#define MQPCB_MASK_HOP_LIMIT_AL                 EHCA_BMASK_IBM(40, 40)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL            EHCA_BMASK_IBM(41, 41)
-#define MQPCB_MASK_FLOW_LABEL_AL                EHCA_BMASK_IBM(42, 42)
-#define MQPCB_MASK_DEST_GID_AL                  EHCA_BMASK_IBM(44, 44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR         EHCA_BMASK_IBM(45, 45)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR         EHCA_BMASK_IBM(46, 46)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK     EHCA_BMASK_IBM(47, 47)
-#define MQPCB_MASK_QP_ENABLE                    EHCA_BMASK_IBM(48, 48)
-#define MQPCB_MASK_CURR_SRQ_LIMIT               EHCA_BMASK_IBM(49, 49)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG       EHCA_BMASK_IBM(50, 50)
-#define MQPCB_MASK_SHARED_RQ_HNDL               EHCA_BMASK_IBM(51, 51)
-
-#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
deleted file mode 100644
index 1aa7931..0000000
--- a/drivers/staging/rdma/ehca/ehca_cq.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Completion queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *cq_cache;
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
-{
-	unsigned int qp_num = qp->real_qp_num;
-	unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
-	unsigned long flags;
-
-	spin_lock_irqsave(&cq->spinlock, flags);
-	hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
-	spin_unlock_irqrestore(&cq->spinlock, flags);
-
-	ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
-		 cq->cq_number, qp_num);
-
-	return 0;
-}
-
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
-{
-	int ret = -EINVAL;
-	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-	struct hlist_node *iter;
-	struct ehca_qp *qp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cq->spinlock, flags);
-	hlist_for_each(iter, &cq->qp_hashtab[key]) {
-		qp = hlist_entry(iter, struct ehca_qp, list_entries);
-		if (qp->real_qp_num == real_qp_num) {
-			hlist_del(iter);
-			ehca_dbg(cq->ib_cq.device,
-				 "removed qp from cq .cq_num=%x real_qp_num=%x",
-				 cq->cq_number, real_qp_num);
-			ret = 0;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&cq->spinlock, flags);
-	if (ret)
-		ehca_err(cq->ib_cq.device,
-			 "qp not found cq_num=%x real_qp_num=%x",
-			 cq->cq_number, real_qp_num);
-
-	return ret;
-}
-
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
-{
-	struct ehca_qp *ret = NULL;
-	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-	struct hlist_node *iter;
-	struct ehca_qp *qp;
-	hlist_for_each(iter, &cq->qp_hashtab[key]) {
-		qp = hlist_entry(iter, struct ehca_qp, list_entries);
-		if (qp->real_qp_num == real_qp_num) {
-			ret = qp;
-			break;
-		}
-	}
-	return ret;
-}
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
-			     const struct ib_cq_init_attr *attr,
-			     struct ib_ucontext *context,
-			     struct ib_udata *udata)
-{
-	int cqe = attr->cqe;
-	static const u32 additional_cqe = 20;
-	struct ib_cq *cq;
-	struct ehca_cq *my_cq;
-	struct ehca_shca *shca =
-		container_of(device, struct ehca_shca, ib_device);
-	struct ipz_adapter_handle adapter_handle;
-	struct ehca_alloc_cq_parms param; /* h_call's out parameters */
-	struct h_galpa gal;
-	void *vpage;
-	u32 counter;
-	u64 rpage, cqx_fec, h_ret;
-	int rc, i;
-	unsigned long flags;
-
-	if (attr->flags)
-		return ERR_PTR(-EINVAL);
-
-	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
-		return ERR_PTR(-EINVAL);
-
-	if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
-		ehca_err(device, "Unable to create CQ, max number of %i "
-			"CQs reached.", shca->max_num_cqs);
-		ehca_err(device, "To increase the maximum number of CQs "
-			"use the number_of_cqs module parameter.\n");
-		return ERR_PTR(-ENOSPC);
-	}
-
-	my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
-	if (!my_cq) {
-		ehca_err(device, "Out of memory for ehca_cq struct device=%p",
-			 device);
-		atomic_dec(&shca->num_cqs);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
-
-	spin_lock_init(&my_cq->spinlock);
-	spin_lock_init(&my_cq->cb_lock);
-	spin_lock_init(&my_cq->task_lock);
-	atomic_set(&my_cq->nr_events, 0);
-	init_waitqueue_head(&my_cq->wait_completion);
-
-	cq = &my_cq->ib_cq;
-
-	adapter_handle = shca->ipz_hca_handle;
-	param.eq_handle = shca->eq.ipz_eq_handle;
-
-	idr_preload(GFP_KERNEL);
-	write_lock_irqsave(&ehca_cq_idr_lock, flags);
-	rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
-	write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-	idr_preload_end();
-
-	if (rc < 0) {
-		cq = ERR_PTR(-ENOMEM);
-		ehca_err(device, "Can't allocate new idr entry. device=%p",
-			 device);
-		goto create_cq_exit1;
-	}
-	my_cq->token = rc;
-
-	/*
-	 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
-	 * for receiving errors CQEs.
-	 */
-	param.nr_cqe = cqe + additional_cqe;
-	h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
-
-	if (h_ret != H_SUCCESS) {
-		ehca_err(device, "hipz_h_alloc_resource_cq() failed "
-			 "h_ret=%lli device=%p", h_ret, device);
-		cq = ERR_PTR(ehca2ib_return_code(h_ret));
-		goto create_cq_exit2;
-	}
-
-	rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
-				EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
-	if (!rc) {
-		ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
-			 rc, device);
-		cq = ERR_PTR(-EINVAL);
-		goto create_cq_exit3;
-	}
-
-	for (counter = 0; counter < param.act_pages; counter++) {
-		vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
-		if (!vpage) {
-			ehca_err(device, "ipz_qpageit_get_inc() "
-				 "returns NULL device=%p", device);
-			cq = ERR_PTR(-EAGAIN);
-			goto create_cq_exit4;
-		}
-		rpage = __pa(vpage);
-
-		h_ret = hipz_h_register_rpage_cq(adapter_handle,
-						 my_cq->ipz_cq_handle,
-						 &my_cq->pf,
-						 0,
-						 0,
-						 rpage,
-						 1,
-						 my_cq->galpas.
-						 kernel);
-
-		if (h_ret < H_SUCCESS) {
-			ehca_err(device, "hipz_h_register_rpage_cq() failed "
-				 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
-				 "act_pages=%i", my_cq, my_cq->cq_number,
-				 h_ret, counter, param.act_pages);
-			cq = ERR_PTR(-EINVAL);
-			goto create_cq_exit4;
-		}
-
-		if (counter == (param.act_pages - 1)) {
-			vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
-			if ((h_ret != H_SUCCESS) || vpage) {
-				ehca_err(device, "Registration of pages not "
-					 "complete ehca_cq=%p cq_num=%x "
-					 "h_ret=%lli", my_cq, my_cq->cq_number,
-					 h_ret);
-				cq = ERR_PTR(-EAGAIN);
-				goto create_cq_exit4;
-			}
-		} else {
-			if (h_ret != H_PAGE_REGISTERED) {
-				ehca_err(device, "Registration of page failed "
-					 "ehca_cq=%p cq_num=%x h_ret=%lli "
-					 "counter=%i act_pages=%i",
-					 my_cq, my_cq->cq_number,
-					 h_ret, counter, param.act_pages);
-				cq = ERR_PTR(-ENOMEM);
-				goto create_cq_exit4;
-			}
-		}
-	}
-
-	ipz_qeit_reset(&my_cq->ipz_queue);
-
-	gal = my_cq->galpas.kernel;
-	cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
-	ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
-		 my_cq, my_cq->cq_number, cqx_fec);
-
-	my_cq->ib_cq.cqe = my_cq->nr_of_entries =
-		param.act_nr_of_entries - additional_cqe;
-	my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
-
-	for (i = 0; i < QP_HASHTAB_LEN; i++)
-		INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
-
-	INIT_LIST_HEAD(&my_cq->sqp_err_list);
-	INIT_LIST_HEAD(&my_cq->rqp_err_list);
-
-	if (context) {
-		struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
-		struct ehca_create_cq_resp resp;
-		memset(&resp, 0, sizeof(resp));
-		resp.cq_number = my_cq->cq_number;
-		resp.token = my_cq->token;
-		resp.ipz_queue.qe_size = ipz_queue->qe_size;
-		resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
-		resp.ipz_queue.queue_length = ipz_queue->queue_length;
-		resp.ipz_queue.pagesize = ipz_queue->pagesize;
-		resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
-		resp.fw_handle_ofs = (u32)
-			(my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
-		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
-			ehca_err(device, "Copy to udata failed.");
-			cq = ERR_PTR(-EFAULT);
-			goto create_cq_exit4;
-		}
-	}
-
-	return cq;
-
-create_cq_exit4:
-	ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-
-create_cq_exit3:
-	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
-	if (h_ret != H_SUCCESS)
-		ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
-			 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
-
-create_cq_exit2:
-	write_lock_irqsave(&ehca_cq_idr_lock, flags);
-	idr_remove(&ehca_cq_idr, my_cq->token);
-	write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-create_cq_exit1:
-	kmem_cache_free(cq_cache, my_cq);
-
-	atomic_dec(&shca->num_cqs);
-	return cq;
-}
-
-int ehca_destroy_cq(struct ib_cq *cq)
-{
-	u64 h_ret;
-	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	int cq_num = my_cq->cq_number;
-	struct ib_device *device = cq->device;
-	struct ehca_shca *shca = container_of(device, struct ehca_shca,
-					      ib_device);
-	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-	unsigned long flags;
-
-	if (cq->uobject) {
-		if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
-			ehca_err(device, "Resources still referenced in "
-				 "user space cq_num=%x", my_cq->cq_number);
-			return -EINVAL;
-		}
-	}
-
-	/*
-	 * remove the CQ from the idr first to make sure
-	 * no more interrupt tasklets will touch this CQ
-	 */
-	write_lock_irqsave(&ehca_cq_idr_lock, flags);
-	idr_remove(&ehca_cq_idr, my_cq->token);
-	write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-	/* now wait until all pending events have completed */
-	wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
-
-	/* nobody's using our CQ any longer -- we can destroy it */
-	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
-	if (h_ret == H_R_STATE) {
-		/* cq in err: read err data and destroy it forcibly */
-		ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
-			 "state. Try to delete it forcibly.",
-			 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
-		ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
-		h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
-		if (h_ret == H_SUCCESS)
-			ehca_dbg(device, "cq_num=%x deleted successfully.",
-				 cq_num);
-	}
-	if (h_ret != H_SUCCESS) {
-		ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
-			 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
-		return ehca2ib_return_code(h_ret);
-	}
-	ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-	kmem_cache_free(cq_cache, my_cq);
-
-	atomic_dec(&shca->num_cqs);
-	return 0;
-}
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
-	/* TODO: proper resize needs to be done */
-	ehca_err(cq->device, "not implemented yet");
-
-	return -EFAULT;
-}
-
-int ehca_init_cq_cache(void)
-{
-	cq_cache = kmem_cache_create("ehca_cache_cq",
-				     sizeof(struct ehca_cq), 0,
-				     SLAB_HWCACHE_ALIGN,
-				     NULL);
-	if (!cq_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void ehca_cleanup_cq_cache(void)
-{
-	kmem_cache_destroy(cq_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
deleted file mode 100644
index 90da674..0000000
--- a/drivers/staging/rdma/ehca/ehca_eq.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Event queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_qes.h"
-#include "hcp_if.h"
-#include "ipz_pt_fn.h"
-
-int ehca_create_eq(struct ehca_shca *shca,
-		   struct ehca_eq *eq,
-		   const enum ehca_eq_type type, const u32 length)
-{
-	int ret;
-	u64 h_ret;
-	u32 nr_pages;
-	u32 i;
-	void *vpage;
-	struct ib_device *ib_dev = &shca->ib_device;
-
-	spin_lock_init(&eq->spinlock);
-	spin_lock_init(&eq->irq_spinlock);
-	eq->is_initialized = 0;
-
-	if (type != EHCA_EQ && type != EHCA_NEQ) {
-		ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
-		return -EINVAL;
-	}
-	if (!length) {
-		ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
-		return -EINVAL;
-	}
-
-	h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
-					 &eq->pf,
-					 type,
-					 length,
-					 &eq->ipz_eq_handle,
-					 &eq->length,
-					 &nr_pages, &eq->ist);
-
-	if (h_ret != H_SUCCESS) {
-		ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
-		return -EINVAL;
-	}
-
-	ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
-			     EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
-	if (!ret) {
-		ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
-		goto create_eq_exit1;
-	}
-
-	for (i = 0; i < nr_pages; i++) {
-		u64 rpage;
-
-		vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-		if (!vpage)
-			goto create_eq_exit2;
-
-		rpage = __pa(vpage);
-		h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
-						 eq->ipz_eq_handle,
-						 &eq->pf,
-						 0, 0, rpage, 1);
-
-		if (i == (nr_pages - 1)) {
-			/* last page */
-			vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-			if (h_ret != H_SUCCESS || vpage)
-				goto create_eq_exit2;
-		} else {
-			if (h_ret != H_PAGE_REGISTERED)
-				goto create_eq_exit2;
-		}
-	}
-
-	ipz_qeit_reset(&eq->ipz_queue);
-
-	/* register interrupt handlers and initialize work queues */
-	if (type == EHCA_EQ) {
-		tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
-
-		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
-					  0, "ehca_eq",
-					  (void *)shca);
-		if (ret < 0)
-			ehca_err(ib_dev, "Can't map interrupt handler.");
-	} else if (type == EHCA_NEQ) {
-		tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
-
-		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
-					  0, "ehca_neq",
-					  (void *)shca);
-		if (ret < 0)
-			ehca_err(ib_dev, "Can't map interrupt handler.");
-	}
-
-	eq->is_initialized = 1;
-
-	return 0;
-
-create_eq_exit2:
-	ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-create_eq_exit1:
-	hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
-	return -EINVAL;
-}
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
-	unsigned long flags;
-	void *eqe;
-
-	spin_lock_irqsave(&eq->spinlock, flags);
-	eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
-	spin_unlock_irqrestore(&eq->spinlock, flags);
-
-	return eqe;
-}
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
-	unsigned long flags;
-	u64 h_ret;
-
-	ibmebus_free_irq(eq->ist, (void *)shca);
-
-	spin_lock_irqsave(&shca_list_lock, flags);
-	eq->is_initialized = 0;
-	spin_unlock_irqrestore(&shca_list_lock, flags);
-
-	tasklet_kill(&eq->interrupt_task);
-
-	h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't free EQ resources.");
-		return -EINVAL;
-	}
-	ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-	return 0;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
deleted file mode 100644
index e8b1bb6..0000000
--- a/drivers/staging/rdma/ehca/ehca_hca.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HCA query functions
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/gfp.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static unsigned int limit_uint(unsigned int value)
-{
-	return min_t(unsigned int, value, INT_MAX);
-}
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-		      struct ib_udata *uhw)
-{
-	int i, ret = 0;
-	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-					      ib_device);
-	struct hipz_query_hca *rblock;
-
-	static const u32 cap_mapping[] = {
-		IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
-		IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
-		IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
-		IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
-		IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
-		IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
-		IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
-		IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
-		IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
-		IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
-		IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
-	};
-
-	if (uhw->inlen || uhw->outlen)
-		return -EINVAL;
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!rblock) {
-		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-		return -ENOMEM;
-	}
-
-	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't query device properties");
-		ret = -EINVAL;
-		goto query_device1;
-	}
-
-	memset(props, 0, sizeof(struct ib_device_attr));
-	props->page_size_cap   = shca->hca_cap_mr_pgsize;
-	props->fw_ver          = rblock->hw_ver;
-	props->max_mr_size     = rblock->max_mr_size;
-	props->vendor_id       = rblock->vendor_id >> 8;
-	props->vendor_part_id  = rblock->vendor_part_id >> 16;
-	props->hw_ver          = rblock->hw_ver;
-	props->max_qp          = limit_uint(rblock->max_qp);
-	props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
-	props->max_sge         = limit_uint(rblock->max_sge);
-	props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
-	props->max_cq          = limit_uint(rblock->max_cq);
-	props->max_cqe         = limit_uint(rblock->max_cqe);
-	props->max_mr          = limit_uint(rblock->max_mr);
-	props->max_mw          = limit_uint(rblock->max_mw);
-	props->max_pd          = limit_uint(rblock->max_pd);
-	props->max_ah          = limit_uint(rblock->max_ah);
-	props->max_ee          = limit_uint(rblock->max_rd_ee_context);
-	props->max_rdd         = limit_uint(rblock->max_rd_domain);
-	props->max_fmr         = limit_uint(rblock->max_mr);
-	props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
-	props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
-	props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
-	props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
-	props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
-
-	if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
-		props->max_srq         = limit_uint(props->max_qp);
-		props->max_srq_wr      = limit_uint(props->max_qp_wr);
-		props->max_srq_sge     = 3;
-	}
-
-	props->max_pkeys           = 16;
-	/* Some FW versions say 0 here; insert sensible value in that case */
-	props->local_ca_ack_delay  = rblock->local_ca_ack_delay ?
-		min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
-	props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
-	props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
-	props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
-	props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
-	props->max_total_mcast_qp_attach
-		= limit_uint(rblock->max_total_mcast_qp_attach);
-
-	/* translate device capabilities */
-	props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
-		IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
-	for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
-		if (rblock->hca_cap_indicators & cap_mapping[i + 1])
-			props->device_cap_flags |= cap_mapping[i];
-
-query_device1:
-	ehca_free_fw_ctrlblock(rblock);
-
-	return ret;
-}
-
-static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
-{
-	switch (fw_mtu) {
-	case 0x1:
-		return IB_MTU_256;
-	case 0x2:
-		return IB_MTU_512;
-	case 0x3:
-		return IB_MTU_1024;
-	case 0x4:
-		return IB_MTU_2048;
-	case 0x5:
-		return IB_MTU_4096;
-	default:
-		ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
-			 fw_mtu);
-		return 0;
-	}
-}
-
-static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
-{
-	switch (vl_cap) {
-	case 0x1:
-		return 1;
-	case 0x2:
-		return 2;
-	case 0x3:
-		return 4;
-	case 0x4:
-		return 8;
-	case 0x5:
-		return 15;
-	default:
-		ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
-			 vl_cap);
-		return 0;
-	}
-}
-
-int ehca_query_port(struct ib_device *ibdev,
-		    u8 port, struct ib_port_attr *props)
-{
-	int ret = 0;
-	u64 h_ret;
-	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-					      ib_device);
-	struct hipz_query_port *rblock;
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!rblock) {
-		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't query port properties");
-		ret = -EINVAL;
-		goto query_port1;
-	}
-
-	memset(props, 0, sizeof(struct ib_port_attr));
-
-	props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
-	props->port_cap_flags  = rblock->capability_mask;
-	props->gid_tbl_len     = rblock->gid_tbl_len;
-	if (rblock->max_msg_sz)
-		props->max_msg_sz      = rblock->max_msg_sz;
-	else
-		props->max_msg_sz      = 0x1 << 31;
-	props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
-	props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
-	props->pkey_tbl_len    = rblock->pkey_tbl_len;
-	props->lid             = rblock->lid;
-	props->sm_lid          = rblock->sm_lid;
-	props->lmc             = rblock->lmc;
-	props->sm_sl           = rblock->sm_sl;
-	props->subnet_timeout  = rblock->subnet_timeout;
-	props->init_type_reply = rblock->init_type_reply;
-	props->max_vl_num      = map_number_of_vls(shca, rblock->vl_cap);
-
-	if (rblock->state && rblock->phys_width) {
-		props->phys_state      = rblock->phys_pstate;
-		props->state           = rblock->phys_state;
-		props->active_width    = rblock->phys_width;
-		props->active_speed    = rblock->phys_speed;
-	} else {
-		/* old firmware releases don't report physical
-		 * port info, so use default values
-		 */
-		props->phys_state      = 5;
-		props->state           = rblock->state;
-		props->active_width    = IB_WIDTH_12X;
-		props->active_speed    = IB_SPEED_SDR;
-	}
-
-query_port1:
-	ehca_free_fw_ctrlblock(rblock);
-
-	return ret;
-}
-
-int ehca_query_sma_attr(struct ehca_shca *shca,
-			u8 port, struct ehca_sma_attr *attr)
-{
-	int ret = 0;
-	u64 h_ret;
-	struct hipz_query_port *rblock;
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-	if (!rblock) {
-		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't query port properties");
-		ret = -EINVAL;
-		goto query_sma_attr1;
-	}
-
-	memset(attr, 0, sizeof(struct ehca_sma_attr));
-
-	attr->lid    = rblock->lid;
-	attr->lmc    = rblock->lmc;
-	attr->sm_sl  = rblock->sm_sl;
-	attr->sm_lid = rblock->sm_lid;
-
-	attr->pkey_tbl_len = rblock->pkey_tbl_len;
-	memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
-
-query_sma_attr1:
-	ehca_free_fw_ctrlblock(rblock);
-
-	return ret;
-}
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
-{
-	int ret = 0;
-	u64 h_ret;
-	struct ehca_shca *shca;
-	struct hipz_query_port *rblock;
-
-	shca = container_of(ibdev, struct ehca_shca, ib_device);
-	if (index > 16) {
-		ehca_err(&shca->ib_device, "Invalid index: %x.", index);
-		return -EINVAL;
-	}
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!rblock) {
-		ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't query port properties");
-		ret = -EINVAL;
-		goto query_pkey1;
-	}
-
-	memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
-
-query_pkey1:
-	ehca_free_fw_ctrlblock(rblock);
-
-	return ret;
-}
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port,
-		   int index, union ib_gid *gid)
-{
-	int ret = 0;
-	u64 h_ret;
-	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-					      ib_device);
-	struct hipz_query_port *rblock;
-
-	if (index < 0 || index > 255) {
-		ehca_err(&shca->ib_device, "Invalid index: %x.", index);
-		return -EINVAL;
-	}
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!rblock) {
-		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't query port properties");
-		ret = -EINVAL;
-		goto query_gid1;
-	}
-
-	memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
-	memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
-
-query_gid1:
-	ehca_free_fw_ctrlblock(rblock);
-
-	return ret;
-}
-
-static const u32 allowed_port_caps = (
-	IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
-	IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
-	IB_PORT_VENDOR_CLASS_SUP);
-
-int ehca_modify_port(struct ib_device *ibdev,
-		     u8 port, int port_modify_mask,
-		     struct ib_port_modify *props)
-{
-	int ret = 0;
-	struct ehca_shca *shca;
-	struct hipz_query_port *rblock;
-	u32 cap;
-	u64 hret;
-
-	shca = container_of(ibdev, struct ehca_shca, ib_device);
-	if ((props->set_port_cap_mask | props->clr_port_cap_mask)
-	    & ~allowed_port_caps) {
-		ehca_err(&shca->ib_device, "Non-changeable bits set in masks  "
-			 "set=%x  clr=%x  allowed=%x", props->set_port_cap_mask,
-			 props->clr_port_cap_mask, allowed_port_caps);
-		return -EINVAL;
-	}
-
-	if (mutex_lock_interruptible(&shca->modify_mutex))
-		return -ERESTARTSYS;
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!rblock) {
-		ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
-		ret = -ENOMEM;
-		goto modify_port1;
-	}
-
-	hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-	if (hret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't query port properties");
-		ret = -EINVAL;
-		goto modify_port2;
-	}
-
-	cap = (rblock->capability_mask | props->set_port_cap_mask)
-		& ~props->clr_port_cap_mask;
-
-	hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
-				  cap, props->init_type, port_modify_mask);
-	if (hret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Modify port failed  h_ret=%lli",
-			 hret);
-		ret = -EINVAL;
-	}
-
-modify_port2:
-	ehca_free_fw_ctrlblock(rblock);
-
-modify_port1:
-	mutex_unlock(&shca->modify_mutex);
-
-	return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
deleted file mode 100644
index 8615d7c..0000000
--- a/drivers/staging/rdma/ehca/ehca_irq.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Functions for EQs, NEQs and interrupts
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <linux/smpboot.h>
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define EQE_COMPLETION_EVENT   EHCA_BMASK_IBM( 1,  1)
-#define EQE_CQ_QP_NUMBER       EHCA_BMASK_IBM( 8, 31)
-#define EQE_EE_IDENTIFIER      EHCA_BMASK_IBM( 2,  7)
-#define EQE_CQ_NUMBER          EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_NUMBER          EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_TOKEN           EHCA_BMASK_IBM(32, 63)
-#define EQE_CQ_TOKEN           EHCA_BMASK_IBM(32, 63)
-
-#define NEQE_COMPLETION_EVENT  EHCA_BMASK_IBM( 1,  1)
-#define NEQE_EVENT_CODE        EHCA_BMASK_IBM( 2,  7)
-#define NEQE_PORT_NUMBER       EHCA_BMASK_IBM( 8, 15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
-#define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16, 16)
-#define NEQE_SPECIFIC_EVENT    EHCA_BMASK_IBM(16, 23)
-
-#define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52, 63)
-#define ERROR_DATA_TYPE        EHCA_BMASK_IBM( 0,  7)
-
-static void queue_comp_task(struct ehca_cq *__cq);
-
-static struct ehca_comp_pool *pool;
-
-static inline void comp_event_callback(struct ehca_cq *cq)
-{
-	if (!cq->ib_cq.comp_handler)
-		return;
-
-	spin_lock(&cq->cb_lock);
-	cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
-	spin_unlock(&cq->cb_lock);
-
-	return;
-}
-
-static void print_error_data(struct ehca_shca *shca, void *data,
-			     u64 *rblock, int length)
-{
-	u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
-	u64 resource = rblock[1];
-
-	switch (type) {
-	case 0x1: /* Queue Pair */
-	{
-		struct ehca_qp *qp = (struct ehca_qp *)data;
-
-		/* only print error data if AER is set */
-		if (rblock[6] == 0)
-			return;
-
-		ehca_err(&shca->ib_device,
-			 "QP 0x%x (resource=%llx) has errors.",
-			 qp->ib_qp.qp_num, resource);
-		break;
-	}
-	case 0x4: /* Completion Queue */
-	{
-		struct ehca_cq *cq = (struct ehca_cq *)data;
-
-		ehca_err(&shca->ib_device,
-			 "CQ 0x%x (resource=%llx) has errors.",
-			 cq->cq_number, resource);
-		break;
-	}
-	default:
-		ehca_err(&shca->ib_device,
-			 "Unknown error type: %llx on %s.",
-			 type, shca->ib_device.name);
-		break;
-	}
-
-	ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
-	ehca_err(&shca->ib_device, "EHCA ----- error data begin "
-		 "---------------------------------------------------");
-	ehca_dmp(rblock, length, "resource=%llx", resource);
-	ehca_err(&shca->ib_device, "EHCA ----- error data end "
-		 "----------------------------------------------------");
-
-	return;
-}
-
-int ehca_error_data(struct ehca_shca *shca, void *data,
-		    u64 resource)
-{
-
-	unsigned long ret;
-	u64 *rblock;
-	unsigned long block_count;
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-	if (!rblock) {
-		ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
-		ret = -ENOMEM;
-		goto error_data1;
-	}
-
-	/* rblock must be 4K aligned and should be 4K large */
-	ret = hipz_h_error_data(shca->ipz_hca_handle,
-				resource,
-				rblock,
-				&block_count);
-
-	if (ret == H_R_STATE)
-		ehca_err(&shca->ib_device,
-			 "No error data is available: %llx.", resource);
-	else if (ret == H_SUCCESS) {
-		int length;
-
-		length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
-
-		if (length > EHCA_PAGESIZE)
-			length = EHCA_PAGESIZE;
-
-		print_error_data(shca, data, rblock, length);
-	} else
-		ehca_err(&shca->ib_device,
-			 "Error data could not be fetched: %llx", resource);
-
-	ehca_free_fw_ctrlblock(rblock);
-
-error_data1:
-	return ret;
-
-}
-
-static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
-			      enum ib_event_type event_type)
-{
-	struct ib_event event;
-
-	/* PATH_MIG without the QP ever having been armed is false alarm */
-	if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
-		return;
-
-	event.device = &shca->ib_device;
-	event.event = event_type;
-
-	if (qp->ext_type == EQPT_SRQ) {
-		if (!qp->ib_srq.event_handler)
-			return;
-
-		event.element.srq = &qp->ib_srq;
-		qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
-	} else {
-		if (!qp->ib_qp.event_handler)
-			return;
-
-		event.element.qp = &qp->ib_qp;
-		qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
-	}
-}
-
-static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
-			      enum ib_event_type event_type, int fatal)
-{
-	struct ehca_qp *qp;
-	u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
-
-	read_lock(&ehca_qp_idr_lock);
-	qp = idr_find(&ehca_qp_idr, token);
-	if (qp)
-		atomic_inc(&qp->nr_events);
-	read_unlock(&ehca_qp_idr_lock);
-
-	if (!qp)
-		return;
-
-	if (fatal)
-		ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
-
-	dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
-			  IB_EVENT_SRQ_ERR : event_type);
-
-	/*
-	 * eHCA only processes one WQE at a time for SRQ base QPs,
-	 * so the last WQE has been processed as soon as the QP enters
-	 * error state.
-	 */
-	if (fatal && qp->ext_type == EQPT_SRQBASE)
-		dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
-
-	if (atomic_dec_and_test(&qp->nr_events))
-		wake_up(&qp->wait_completion);
-	return;
-}
-
-static void cq_event_callback(struct ehca_shca *shca,
-			      u64 eqe)
-{
-	struct ehca_cq *cq;
-	u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
-
-	read_lock(&ehca_cq_idr_lock);
-	cq = idr_find(&ehca_cq_idr, token);
-	if (cq)
-		atomic_inc(&cq->nr_events);
-	read_unlock(&ehca_cq_idr_lock);
-
-	if (!cq)
-		return;
-
-	ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
-
-	if (atomic_dec_and_test(&cq->nr_events))
-		wake_up(&cq->wait_completion);
-
-	return;
-}
-
-static void parse_identifier(struct ehca_shca *shca, u64 eqe)
-{
-	u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
-
-	switch (identifier) {
-	case 0x02: /* path migrated */
-		qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
-		break;
-	case 0x03: /* communication established */
-		qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
-		break;
-	case 0x04: /* send queue drained */
-		qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
-		break;
-	case 0x05: /* QP error */
-	case 0x06: /* QP error */
-		qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
-		break;
-	case 0x07: /* CQ error */
-	case 0x08: /* CQ error */
-		cq_event_callback(shca, eqe);
-		break;
-	case 0x09: /* MRMWPTE error */
-		ehca_err(&shca->ib_device, "MRMWPTE error.");
-		break;
-	case 0x0A: /* port event */
-		ehca_err(&shca->ib_device, "Port event.");
-		break;
-	case 0x0B: /* MR access error */
-		ehca_err(&shca->ib_device, "MR access error.");
-		break;
-	case 0x0C: /* EQ error */
-		ehca_err(&shca->ib_device, "EQ error.");
-		break;
-	case 0x0D: /* P/Q_Key mismatch */
-		ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
-		break;
-	case 0x10: /* sampling complete */
-		ehca_err(&shca->ib_device, "Sampling complete.");
-		break;
-	case 0x11: /* unaffiliated access error */
-		ehca_err(&shca->ib_device, "Unaffiliated access error.");
-		break;
-	case 0x12: /* path migrating */
-		ehca_err(&shca->ib_device, "Path migrating.");
-		break;
-	case 0x13: /* interface trace stopped */
-		ehca_err(&shca->ib_device, "Interface trace stopped.");
-		break;
-	case 0x14: /* first error capture info available */
-		ehca_info(&shca->ib_device, "First error capture available");
-		break;
-	case 0x15: /* SRQ limit reached */
-		qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
-		break;
-	default:
-		ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
-			 identifier, shca->ib_device.name);
-		break;
-	}
-
-	return;
-}
-
-static void dispatch_port_event(struct ehca_shca *shca, int port_num,
-				enum ib_event_type type, const char *msg)
-{
-	struct ib_event event;
-
-	ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
-	event.device = &shca->ib_device;
-	event.event = type;
-	event.element.port_num = port_num;
-	ib_dispatch_event(&event);
-}
-
-static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
-{
-	struct ehca_sma_attr  new_attr;
-	struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
-
-	ehca_query_sma_attr(shca, port_num, &new_attr);
-
-	if (new_attr.sm_sl  != old_attr->sm_sl ||
-	    new_attr.sm_lid != old_attr->sm_lid)
-		dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
-				    "SM changed");
-
-	if (new_attr.lid != old_attr->lid ||
-	    new_attr.lmc != old_attr->lmc)
-		dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
-				    "LID changed");
-
-	if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
-	    memcmp(new_attr.pkeys, old_attr->pkeys,
-		   sizeof(u16) * new_attr.pkey_tbl_len))
-		dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
-				    "P_Key changed");
-
-	*old_attr = new_attr;
-}
-
-/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
-static int replay_modify_qp(struct ehca_sport *sport)
-{
-	int aqp1_destroyed;
-	unsigned long flags;
-
-	spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-
-	aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
-
-	if (sport->ibqp_sqp[IB_QPT_SMI])
-		ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
-	if (!aqp1_destroyed)
-		ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
-
-	spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-
-	return aqp1_destroyed;
-}
-
-static void parse_ec(struct ehca_shca *shca, u64 eqe)
-{
-	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
-	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
-	u8 spec_event;
-	struct ehca_sport *sport = &shca->sport[port - 1];
-
-	switch (ec) {
-	case 0x30: /* port availability change */
-		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
-			/* only replay modify_qp calls in autodetect mode;
-			 * if AQP1 was destroyed, the port is already down
-			 * again and we can drop the event.
-			 */
-			if (ehca_nr_ports < 0)
-				if (replay_modify_qp(sport))
-					break;
-
-			sport->port_state = IB_PORT_ACTIVE;
-			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
-					    "is active");
-			ehca_query_sma_attr(shca, port, &sport->saved_attr);
-		} else {
-			sport->port_state = IB_PORT_DOWN;
-			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
-					    "is inactive");
-		}
-		break;
-	case 0x31:
-		/* port configuration change
-		 * disruptive change is caused by
-		 * LID, PKEY or SM change
-		 */
-		if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
-			ehca_warn(&shca->ib_device, "disruptive port "
-				  "%d configuration change", port);
-
-			sport->port_state = IB_PORT_DOWN;
-			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
-					    "is inactive");
-
-			sport->port_state = IB_PORT_ACTIVE;
-			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
-					    "is active");
-			ehca_query_sma_attr(shca, port,
-					    &sport->saved_attr);
-		} else
-			notify_port_conf_change(shca, port);
-		break;
-	case 0x32: /* adapter malfunction */
-		ehca_err(&shca->ib_device, "Adapter malfunction.");
-		break;
-	case 0x33:  /* trace stopped */
-		ehca_err(&shca->ib_device, "Traced stopped.");
-		break;
-	case 0x34: /* util async event */
-		spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
-		if (spec_event == 0x80) /* client reregister required */
-			dispatch_port_event(shca, port,
-					    IB_EVENT_CLIENT_REREGISTER,
-					    "client reregister req.");
-		else
-			ehca_warn(&shca->ib_device, "Unknown util async "
-				  "event %x on port %x", spec_event, port);
-		break;
-	default:
-		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
-			 ec, shca->ib_device.name);
-		break;
-	}
-
-	return;
-}
-
-static inline void reset_eq_pending(struct ehca_cq *cq)
-{
-	u64 CQx_EP;
-	struct h_galpa gal = cq->galpas.kernel;
-
-	hipz_galpa_store_cq(gal, cqx_ep, 0x0);
-	CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
-
-	return;
-}
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
-{
-	struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
-	tasklet_hi_schedule(&shca->neq.interrupt_task);
-
-	return IRQ_HANDLED;
-}
-
-void ehca_tasklet_neq(unsigned long data)
-{
-	struct ehca_shca *shca = (struct ehca_shca*)data;
-	struct ehca_eqe *eqe;
-	u64 ret;
-
-	eqe = ehca_poll_eq(shca, &shca->neq);
-
-	while (eqe) {
-		if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
-			parse_ec(shca, eqe->entry);
-
-		eqe = ehca_poll_eq(shca, &shca->neq);
-	}
-
-	ret = hipz_h_reset_event(shca->ipz_hca_handle,
-				 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
-
-	if (ret != H_SUCCESS)
-		ehca_err(&shca->ib_device, "Can't clear notification events.");
-
-	return;
-}
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
-{
-	struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
-	tasklet_hi_schedule(&shca->eq.interrupt_task);
-
-	return IRQ_HANDLED;
-}
-
-
-static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
-{
-	u64 eqe_value;
-	u32 token;
-	struct ehca_cq *cq;
-
-	eqe_value = eqe->entry;
-	ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
-	if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
-		ehca_dbg(&shca->ib_device, "Got completion event");
-		token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-		read_lock(&ehca_cq_idr_lock);
-		cq = idr_find(&ehca_cq_idr, token);
-		if (cq)
-			atomic_inc(&cq->nr_events);
-		read_unlock(&ehca_cq_idr_lock);
-		if (cq == NULL) {
-			ehca_err(&shca->ib_device,
-				 "Invalid eqe for non-existing cq token=%x",
-				 token);
-			return;
-		}
-		reset_eq_pending(cq);
-		if (ehca_scaling_code)
-			queue_comp_task(cq);
-		else {
-			comp_event_callback(cq);
-			if (atomic_dec_and_test(&cq->nr_events))
-				wake_up(&cq->wait_completion);
-		}
-	} else {
-		ehca_dbg(&shca->ib_device, "Got non completion event");
-		parse_identifier(shca, eqe_value);
-	}
-}
-
-void ehca_process_eq(struct ehca_shca *shca, int is_irq)
-{
-	struct ehca_eq *eq = &shca->eq;
-	struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
-	u64 eqe_value, ret;
-	int eqe_cnt, i;
-	int eq_empty = 0;
-
-	spin_lock(&eq->irq_spinlock);
-	if (is_irq) {
-		const int max_query_cnt = 100;
-		int query_cnt = 0;
-		int int_state = 1;
-		do {
-			int_state = hipz_h_query_int_state(
-				shca->ipz_hca_handle, eq->ist);
-			query_cnt++;
-			iosync();
-		} while (int_state && query_cnt < max_query_cnt);
-		if (unlikely((query_cnt == max_query_cnt)))
-			ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
-				 int_state, query_cnt);
-	}
-
-	/* read out all eqes */
-	eqe_cnt = 0;
-	do {
-		u32 token;
-		eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
-		if (!eqe_cache[eqe_cnt].eqe)
-			break;
-		eqe_value = eqe_cache[eqe_cnt].eqe->entry;
-		if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
-			token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-			read_lock(&ehca_cq_idr_lock);
-			eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
-			if (eqe_cache[eqe_cnt].cq)
-				atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
-			read_unlock(&ehca_cq_idr_lock);
-			if (!eqe_cache[eqe_cnt].cq) {
-				ehca_err(&shca->ib_device,
-					 "Invalid eqe for non-existing cq "
-					 "token=%x", token);
-				continue;
-			}
-		} else
-			eqe_cache[eqe_cnt].cq = NULL;
-		eqe_cnt++;
-	} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
-	if (!eqe_cnt) {
-		if (is_irq)
-			ehca_dbg(&shca->ib_device,
-				 "No eqe found for irq event");
-		goto unlock_irq_spinlock;
-	} else if (!is_irq) {
-		ret = hipz_h_eoi(eq->ist);
-		if (ret != H_SUCCESS)
-			ehca_err(&shca->ib_device,
-				 "bad return code EOI -rc = %lld\n", ret);
-		ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
-	}
-	if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
-		ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
-	/* enable irq for new packets */
-	for (i = 0; i < eqe_cnt; i++) {
-		if (eq->eqe_cache[i].cq)
-			reset_eq_pending(eq->eqe_cache[i].cq);
-	}
-	/* check eq */
-	spin_lock(&eq->spinlock);
-	eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
-	spin_unlock(&eq->spinlock);
-	/* call completion handler for cached eqes */
-	for (i = 0; i < eqe_cnt; i++)
-		if (eq->eqe_cache[i].cq) {
-			if (ehca_scaling_code)
-				queue_comp_task(eq->eqe_cache[i].cq);
-			else {
-				struct ehca_cq *cq = eq->eqe_cache[i].cq;
-				comp_event_callback(cq);
-				if (atomic_dec_and_test(&cq->nr_events))
-					wake_up(&cq->wait_completion);
-			}
-		} else {
-			ehca_dbg(&shca->ib_device, "Got non completion event");
-			parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
-		}
-	/* poll eq if not empty */
-	if (eq_empty)
-		goto unlock_irq_spinlock;
-	do {
-		struct ehca_eqe *eqe;
-		eqe = ehca_poll_eq(shca, &shca->eq);
-		if (!eqe)
-			break;
-		process_eqe(shca, eqe);
-	} while (1);
-
-unlock_irq_spinlock:
-	spin_unlock(&eq->irq_spinlock);
-}
-
-void ehca_tasklet_eq(unsigned long data)
-{
-	ehca_process_eq((struct ehca_shca*)data, 1);
-}
-
-static int find_next_online_cpu(struct ehca_comp_pool *pool)
-{
-	int cpu;
-	unsigned long flags;
-
-	WARN_ON_ONCE(!in_interrupt());
-	if (ehca_debug_level >= 3)
-		ehca_dmp(cpu_online_mask, cpumask_size(), "");
-
-	spin_lock_irqsave(&pool->last_cpu_lock, flags);
-	do {
-		cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
-		if (cpu >= nr_cpu_ids)
-			cpu = cpumask_first(cpu_online_mask);
-		pool->last_cpu = cpu;
-	} while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
-	spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
-
-	return cpu;
-}
-
-static void __queue_comp_task(struct ehca_cq *__cq,
-			      struct ehca_cpu_comp_task *cct,
-			      struct task_struct *thread)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cct->task_lock, flags);
-	spin_lock(&__cq->task_lock);
-
-	if (__cq->nr_callbacks == 0) {
-		__cq->nr_callbacks++;
-		list_add_tail(&__cq->entry, &cct->cq_list);
-		cct->cq_jobs++;
-		wake_up_process(thread);
-	} else
-		__cq->nr_callbacks++;
-
-	spin_unlock(&__cq->task_lock);
-	spin_unlock_irqrestore(&cct->task_lock, flags);
-}
-
-static void queue_comp_task(struct ehca_cq *__cq)
-{
-	int cpu_id;
-	struct ehca_cpu_comp_task *cct;
-	struct task_struct *thread;
-	int cq_jobs;
-	unsigned long flags;
-
-	cpu_id = find_next_online_cpu(pool);
-	BUG_ON(!cpu_online(cpu_id));
-
-	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
-	thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
-	BUG_ON(!cct || !thread);
-
-	spin_lock_irqsave(&cct->task_lock, flags);
-	cq_jobs = cct->cq_jobs;
-	spin_unlock_irqrestore(&cct->task_lock, flags);
-	if (cq_jobs > 0) {
-		cpu_id = find_next_online_cpu(pool);
-		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
-		thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
-		BUG_ON(!cct || !thread);
-	}
-	__queue_comp_task(__cq, cct, thread);
-}
-
-static void run_comp_task(struct ehca_cpu_comp_task *cct)
-{
-	struct ehca_cq *cq;
-
-	while (!list_empty(&cct->cq_list)) {
-		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
-		spin_unlock_irq(&cct->task_lock);
-
-		comp_event_callback(cq);
-		if (atomic_dec_and_test(&cq->nr_events))
-			wake_up(&cq->wait_completion);
-
-		spin_lock_irq(&cct->task_lock);
-		spin_lock(&cq->task_lock);
-		cq->nr_callbacks--;
-		if (!cq->nr_callbacks) {
-			list_del_init(cct->cq_list.next);
-			cct->cq_jobs--;
-		}
-		spin_unlock(&cq->task_lock);
-	}
-}
-
-static void comp_task_park(unsigned int cpu)
-{
-	struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-	struct ehca_cpu_comp_task *target;
-	struct task_struct *thread;
-	struct ehca_cq *cq, *tmp;
-	LIST_HEAD(list);
-
-	spin_lock_irq(&cct->task_lock);
-	cct->cq_jobs = 0;
-	cct->active = 0;
-	list_splice_init(&cct->cq_list, &list);
-	spin_unlock_irq(&cct->task_lock);
-
-	cpu = find_next_online_cpu(pool);
-	target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-	thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
-	spin_lock_irq(&target->task_lock);
-	list_for_each_entry_safe(cq, tmp, &list, entry) {
-		list_del(&cq->entry);
-		__queue_comp_task(cq, target, thread);
-	}
-	spin_unlock_irq(&target->task_lock);
-}
-
-static void comp_task_stop(unsigned int cpu, bool online)
-{
-	struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
-	spin_lock_irq(&cct->task_lock);
-	cct->cq_jobs = 0;
-	cct->active = 0;
-	WARN_ON(!list_empty(&cct->cq_list));
-	spin_unlock_irq(&cct->task_lock);
-}
-
-static int comp_task_should_run(unsigned int cpu)
-{
-	struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
-	return cct->cq_jobs;
-}
-
-static void comp_task(unsigned int cpu)
-{
-	struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
-	int cql_empty;
-
-	spin_lock_irq(&cct->task_lock);
-	cql_empty = list_empty(&cct->cq_list);
-	if (!cql_empty) {
-		__set_current_state(TASK_RUNNING);
-		run_comp_task(cct);
-	}
-	spin_unlock_irq(&cct->task_lock);
-}
-
-static struct smp_hotplug_thread comp_pool_threads = {
-	.thread_should_run	= comp_task_should_run,
-	.thread_fn		= comp_task,
-	.thread_comm		= "ehca_comp/%u",
-	.cleanup		= comp_task_stop,
-	.park			= comp_task_park,
-};
-
-int ehca_create_comp_pool(void)
-{
-	int cpu, ret = -ENOMEM;
-
-	if (!ehca_scaling_code)
-		return 0;
-
-	pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
-	if (pool == NULL)
-		return -ENOMEM;
-
-	spin_lock_init(&pool->last_cpu_lock);
-	pool->last_cpu = cpumask_any(cpu_online_mask);
-
-	pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
-	if (!pool->cpu_comp_tasks)
-		goto out_pool;
-
-	pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
-	if (!pool->cpu_comp_threads)
-		goto out_tasks;
-
-	for_each_present_cpu(cpu) {
-		struct ehca_cpu_comp_task *cct;
-
-		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-		spin_lock_init(&cct->task_lock);
-		INIT_LIST_HEAD(&cct->cq_list);
-	}
-
-	comp_pool_threads.store = pool->cpu_comp_threads;
-	ret = smpboot_register_percpu_thread(&comp_pool_threads);
-	if (ret)
-		goto out_threads;
-
-	pr_info("eHCA scaling code enabled\n");
-	return ret;
-
-out_threads:
-	free_percpu(pool->cpu_comp_threads);
-out_tasks:
-	free_percpu(pool->cpu_comp_tasks);
-out_pool:
-	kfree(pool);
-	return ret;
-}
-
-void ehca_destroy_comp_pool(void)
-{
-	if (!ehca_scaling_code)
-		return;
-
-	smpboot_unregister_percpu_thread(&comp_pool_threads);
-
-	free_percpu(pool->cpu_comp_threads);
-	free_percpu(pool->cpu_comp_tasks);
-	kfree(pool);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
deleted file mode 100644
index 5370199..0000000
--- a/drivers/staging/rdma/ehca/ehca_irq.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Function definitions and structs for EQs, NEQs and interrupts
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IRQ_H
-#define __EHCA_IRQ_H
-
-
-struct ehca_shca;
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
-void ehca_tasklet_neq(unsigned long data);
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
-void ehca_tasklet_eq(unsigned long data);
-void ehca_process_eq(struct ehca_shca *shca, int is_irq);
-
-struct ehca_cpu_comp_task {
-	struct list_head cq_list;
-	spinlock_t task_lock;
-	int cq_jobs;
-	int active;
-};
-
-struct ehca_comp_pool {
-	struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
-	struct task_struct * __percpu *cpu_comp_threads;
-	int last_cpu;
-	spinlock_t last_cpu_lock;
-};
-
-int ehca_create_comp_pool(void);
-void ehca_destroy_comp_pool(void);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
deleted file mode 100644
index cca5933..0000000
--- a/drivers/staging/rdma/ehca/ehca_iverbs.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Function definitions for internal functions
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Dietmar Decker <ddecker@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IVERBS_H__
-#define __EHCA_IVERBS_H__
-
-#include "ehca_classes.h"
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-		      struct ib_udata *uhw);
-
-int ehca_query_port(struct ib_device *ibdev, u8 port,
-		    struct ib_port_attr *props);
-
-enum rdma_protocol_type
-ehca_query_protocol(struct ib_device *device, u8 port_num);
-
-int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
-			struct ehca_sma_attr *attr);
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
-		   union ib_gid *gid);
-
-int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
-		     struct ib_port_modify *props);
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
-			    struct ib_ucontext *context,
-			    struct ib_udata *udata);
-
-int ehca_dealloc_pd(struct ib_pd *pd);
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_destroy_ah(struct ib_ah *ah);
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-			       u64 virt, int mr_access_flags,
-			       struct ib_udata *udata);
-
-int ehca_dereg_mr(struct ib_mr *mr);
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-
-int ehca_dealloc_mw(struct ib_mw *mw);
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
-			      int mr_access_flags,
-			      struct ib_fmr_attr *fmr_attr);
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
-		      u64 *page_list, int list_len, u64 iova);
-
-int ehca_unmap_fmr(struct list_head *fmr_list);
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr);
-
-enum ehca_eq_type {
-	EHCA_EQ = 0, /* Event Queue              */
-	EHCA_NEQ     /* Notification Event Queue */
-};
-
-int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
-		   enum ehca_eq_type type, const u32 length);
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
-			     const struct ib_cq_init_attr *attr,
-			     struct ib_ucontext *context,
-			     struct ib_udata *udata);
-
-int ehca_destroy_cq(struct ib_cq *cq);
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
-
-int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
-			     struct ib_qp_init_attr *init_attr,
-			     struct ib_udata *udata);
-
-int ehca_destroy_qp(struct ib_qp *qp);
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
-		   struct ib_udata *udata);
-
-int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
-		  int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
-		   struct ib_send_wr **bad_send_wr);
-
-int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
-		   struct ib_recv_wr **bad_recv_wr);
-
-int ehca_post_srq_recv(struct ib_srq *srq,
-		       struct ib_recv_wr *recv_wr,
-		       struct ib_recv_wr **bad_recv_wr);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
-			       struct ib_srq_init_attr *init_attr,
-			       struct ib_udata *udata);
-
-int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
-		    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-
-int ehca_destroy_srq(struct ib_srq *srq);
-
-u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
-		    struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
-					struct ib_udata *udata);
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context);
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-		     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-		     const struct ib_mad_hdr *in, size_t in_mad_size,
-		     struct ib_mad_hdr *out, size_t *out_mad_size,
-		     u16 *out_mad_pkey_index);
-
-void ehca_poll_eqs(unsigned long data);
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
-		  enum ib_rate path_rate, u32 *ipd);
-
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
-
-#ifdef CONFIG_PPC_64K_PAGES
-void *ehca_alloc_fw_ctrlblock(gfp_t flags);
-void ehca_free_fw_ctrlblock(void *ptr);
-#else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
-#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
-#endif
-
-void ehca_recover_sqp(struct ib_qp *sqp);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
deleted file mode 100644
index 832f22f..0000000
--- a/drivers/staging/rdma/ehca/ehca_main.c
+++ /dev/null
@@ -1,1118 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  module start stop, hca detection
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <linux/slab.h>
-#endif
-
-#include <linux/notifier.h>
-#include <linux/memory.h>
-#include <rdma/ib_mad.h>
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-#define HCAD_VERSION "0029"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
-MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION(HCAD_VERSION);
-
-static bool ehca_open_aqp1    = 0;
-static int ehca_hw_level      = 0;
-static bool ehca_poll_all_eqs = 1;
-
-int ehca_debug_level   = 0;
-int ehca_nr_ports      = -1;
-bool ehca_use_hp_mr    = 0;
-int ehca_port_act_time = 30;
-int ehca_static_rate   = -1;
-bool ehca_scaling_code = 0;
-int ehca_lock_hcalls   = -1;
-int ehca_max_cq        = -1;
-int ehca_max_qp        = -1;
-
-module_param_named(open_aqp1,     ehca_open_aqp1,     bool, S_IRUGO);
-module_param_named(debug_level,   ehca_debug_level,   int,  S_IRUGO);
-module_param_named(hw_level,      ehca_hw_level,      int,  S_IRUGO);
-module_param_named(nr_ports,      ehca_nr_ports,      int,  S_IRUGO);
-module_param_named(use_hp_mr,     ehca_use_hp_mr,     bool, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int,  S_IRUGO);
-module_param_named(poll_all_eqs,  ehca_poll_all_eqs,  bool, S_IRUGO);
-module_param_named(static_rate,   ehca_static_rate,   int,  S_IRUGO);
-module_param_named(scaling_code,  ehca_scaling_code,  bool, S_IRUGO);
-module_param_named(lock_hcalls,   ehca_lock_hcalls,   bint, S_IRUGO);
-module_param_named(number_of_cqs, ehca_max_cq,        int,  S_IRUGO);
-module_param_named(number_of_qps, ehca_max_qp,        int,  S_IRUGO);
-
-MODULE_PARM_DESC(open_aqp1,
-		 "Open AQP1 on startup (default: no)");
-MODULE_PARM_DESC(debug_level,
-		 "Amount of debug output (0: none (default), 1: traces, "
-		 "2: some dumps, 3: lots)");
-MODULE_PARM_DESC(hw_level,
-		 "Hardware level (0: autosensing (default), "
-		 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
-MODULE_PARM_DESC(nr_ports,
-		 "number of connected ports (-1: autodetect (default), "
-		 "1: port one only, 2: two ports)");
-MODULE_PARM_DESC(use_hp_mr,
-		 "Use high performance MRs (default: no)");
-MODULE_PARM_DESC(port_act_time,
-		 "Time to wait for port activation (default: 30 sec)");
-MODULE_PARM_DESC(poll_all_eqs,
-		 "Poll all event queues periodically (default: yes)");
-MODULE_PARM_DESC(static_rate,
-		 "Set permanent static rate (default: no static rate)");
-MODULE_PARM_DESC(scaling_code,
-		 "Enable scaling code (default: no)");
-MODULE_PARM_DESC(lock_hcalls,
-		 "Serialize all hCalls made by the driver "
-		 "(default: autodetect)");
-MODULE_PARM_DESC(number_of_cqs,
-		"Max number of CQs which can be allocated "
-		"(default: autodetect)");
-MODULE_PARM_DESC(number_of_qps,
-		"Max number of QPs which can be allocated "
-		"(default: autodetect)");
-
-DEFINE_RWLOCK(ehca_qp_idr_lock);
-DEFINE_RWLOCK(ehca_cq_idr_lock);
-DEFINE_IDR(ehca_qp_idr);
-DEFINE_IDR(ehca_cq_idr);
-
-static LIST_HEAD(shca_list); /* list of all registered ehcas */
-DEFINE_SPINLOCK(shca_list_lock);
-
-static struct timer_list poll_eqs_timer;
-
-#ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache;
-
-void *ehca_alloc_fw_ctrlblock(gfp_t flags)
-{
-	void *ret = kmem_cache_zalloc(ctblk_cache, flags);
-	if (!ret)
-		ehca_gen_err("Out of memory for ctblk");
-	return ret;
-}
-
-void ehca_free_fw_ctrlblock(void *ptr)
-{
-	if (ptr)
-		kmem_cache_free(ctblk_cache, ptr);
-
-}
-#endif
-
-int ehca2ib_return_code(u64 ehca_rc)
-{
-	switch (ehca_rc) {
-	case H_SUCCESS:
-		return 0;
-	case H_RESOURCE:             /* Resource in use */
-	case H_BUSY:
-		return -EBUSY;
-	case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
-	case H_CONSTRAINED:          /* resource constraint */
-	case H_NO_MEM:
-		return -ENOMEM;
-	default:
-		return -EINVAL;
-	}
-}
-
-static int ehca_create_slab_caches(void)
-{
-	int ret;
-
-	ret = ehca_init_pd_cache();
-	if (ret) {
-		ehca_gen_err("Cannot create PD SLAB cache.");
-		return ret;
-	}
-
-	ret = ehca_init_cq_cache();
-	if (ret) {
-		ehca_gen_err("Cannot create CQ SLAB cache.");
-		goto create_slab_caches2;
-	}
-
-	ret = ehca_init_qp_cache();
-	if (ret) {
-		ehca_gen_err("Cannot create QP SLAB cache.");
-		goto create_slab_caches3;
-	}
-
-	ret = ehca_init_av_cache();
-	if (ret) {
-		ehca_gen_err("Cannot create AV SLAB cache.");
-		goto create_slab_caches4;
-	}
-
-	ret = ehca_init_mrmw_cache();
-	if (ret) {
-		ehca_gen_err("Cannot create MR&MW SLAB cache.");
-		goto create_slab_caches5;
-	}
-
-	ret = ehca_init_small_qp_cache();
-	if (ret) {
-		ehca_gen_err("Cannot create small queue SLAB cache.");
-		goto create_slab_caches6;
-	}
-
-#ifdef CONFIG_PPC_64K_PAGES
-	ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
-					EHCA_PAGESIZE, H_CB_ALIGNMENT,
-					SLAB_HWCACHE_ALIGN,
-					NULL);
-	if (!ctblk_cache) {
-		ehca_gen_err("Cannot create ctblk SLAB cache.");
-		ehca_cleanup_small_qp_cache();
-		ret = -ENOMEM;
-		goto create_slab_caches6;
-	}
-#endif
-	return 0;
-
-create_slab_caches6:
-	ehca_cleanup_mrmw_cache();
-
-create_slab_caches5:
-	ehca_cleanup_av_cache();
-
-create_slab_caches4:
-	ehca_cleanup_qp_cache();
-
-create_slab_caches3:
-	ehca_cleanup_cq_cache();
-
-create_slab_caches2:
-	ehca_cleanup_pd_cache();
-
-	return ret;
-}
-
-static void ehca_destroy_slab_caches(void)
-{
-	ehca_cleanup_small_qp_cache();
-	ehca_cleanup_mrmw_cache();
-	ehca_cleanup_av_cache();
-	ehca_cleanup_qp_cache();
-	ehca_cleanup_cq_cache();
-	ehca_cleanup_pd_cache();
-#ifdef CONFIG_PPC_64K_PAGES
-	kmem_cache_destroy(ctblk_cache);
-#endif
-}
-
-#define EHCA_HCAAVER  EHCA_BMASK_IBM(32, 39)
-#define EHCA_REVID    EHCA_BMASK_IBM(40, 63)
-
-static struct cap_descr {
-	u64 mask;
-	char *descr;
-} hca_cap_descr[] = {
-	{ HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
-	{ HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
-	{ HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
-	{ HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
-	{ HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
-	{ HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
-	{ HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
-	{ HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
-	{ HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
-	{ HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
-	{ HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
-	{ HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
-	{ HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
-	{ HCA_CAP_SRQ, "HCA_CAP_SRQ" },
-	{ HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
-	{ HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
-	{ HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
-	{ HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
-};
-
-static int ehca_sense_attributes(struct ehca_shca *shca)
-{
-	int i, ret = 0;
-	u64 h_ret;
-	struct hipz_query_hca *rblock;
-	struct hipz_query_port *port;
-	const char *loc_code;
-
-	static const u32 pgsize_map[] = {
-		HCA_CAP_MR_PGSIZE_4K,  0x1000,
-		HCA_CAP_MR_PGSIZE_64K, 0x10000,
-		HCA_CAP_MR_PGSIZE_1M,  0x100000,
-		HCA_CAP_MR_PGSIZE_16M, 0x1000000,
-	};
-
-	ehca_gen_dbg("Probing adapter %s...",
-		     shca->ofdev->dev.of_node->full_name);
-	loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
-				   NULL);
-	if (loc_code)
-		ehca_gen_dbg(" ... location lode=%s", loc_code);
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!rblock) {
-		ehca_gen_err("Cannot allocate rblock memory.");
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
-	if (h_ret != H_SUCCESS) {
-		ehca_gen_err("Cannot query device properties. h_ret=%lli",
-			     h_ret);
-		ret = -EPERM;
-		goto sense_attributes1;
-	}
-
-	if (ehca_nr_ports == 1)
-		shca->num_ports = 1;
-	else
-		shca->num_ports = (u8)rblock->num_ports;
-
-	ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
-
-	if (ehca_hw_level == 0) {
-		u32 hcaaver;
-		u32 revid;
-
-		hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
-		revid   = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
-
-		ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
-
-		if (hcaaver == 1) {
-			if (revid <= 3)
-				shca->hw_level = 0x10 | (revid + 1);
-			else
-				shca->hw_level = 0x14;
-		} else if (hcaaver == 2) {
-			if (revid == 0)
-				shca->hw_level = 0x21;
-			else if (revid == 0x10)
-				shca->hw_level = 0x22;
-			else if (revid == 0x20 || revid == 0x21)
-				shca->hw_level = 0x23;
-		}
-
-		if (!shca->hw_level) {
-			ehca_gen_warn("unknown hardware version"
-				      " - assuming default level");
-			shca->hw_level = 0x22;
-		}
-	} else
-		shca->hw_level = ehca_hw_level;
-	ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
-
-	shca->hca_cap = rblock->hca_cap_indicators;
-	ehca_gen_dbg(" ... HCA capabilities:");
-	for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
-		if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
-			ehca_gen_dbg("   %s", hca_cap_descr[i].descr);
-
-	/* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
-	 * a firmware property, so it's valid across all adapters
-	 */
-	if (ehca_lock_hcalls == -1)
-		ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
-					shca->hca_cap);
-
-	/* translate supported MR page sizes; always support 4K */
-	shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
-	for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
-		if (rblock->memory_page_size_supported & pgsize_map[i])
-			shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
-
-	/* Set maximum number of CQs and QPs to calculate EQ size */
-	if (shca->max_num_qps == -1)
-		shca->max_num_qps = min_t(int, rblock->max_qp,
-					  EHCA_MAX_NUM_QUEUES);
-	else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
-		ehca_gen_warn("The requested number of QPs is out of range "
-			      "(1 - %i) specified by HW. Value is set to %i",
-			      rblock->max_qp, rblock->max_qp);
-		shca->max_num_qps = rblock->max_qp;
-	}
-
-	if (shca->max_num_cqs == -1)
-		shca->max_num_cqs = min_t(int, rblock->max_cq,
-					  EHCA_MAX_NUM_QUEUES);
-	else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
-		ehca_gen_warn("The requested number of CQs is out of range "
-			      "(1 - %i) specified by HW. Value is set to %i",
-			      rblock->max_cq, rblock->max_cq);
-	}
-
-	/* query max MTU from first port -- it's the same for all ports */
-	port = (struct hipz_query_port *)rblock;
-	h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
-	if (h_ret != H_SUCCESS) {
-		ehca_gen_err("Cannot query port properties. h_ret=%lli",
-			     h_ret);
-		ret = -EPERM;
-		goto sense_attributes1;
-	}
-
-	shca->max_mtu = port->max_mtu;
-
-sense_attributes1:
-	ehca_free_fw_ctrlblock(rblock);
-	return ret;
-}
-
-static int init_node_guid(struct ehca_shca *shca)
-{
-	int ret = 0;
-	struct hipz_query_hca *rblock;
-
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!rblock) {
-		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-		return -ENOMEM;
-	}
-
-	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Can't query device properties");
-		ret = -EINVAL;
-		goto init_node_guid1;
-	}
-
-	memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
-
-init_node_guid1:
-	ehca_free_fw_ctrlblock(rblock);
-	return ret;
-}
-
-static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
-			       struct ib_port_immutable *immutable)
-{
-	struct ib_port_attr attr;
-	int err;
-
-	err = ehca_query_port(ibdev, port_num, &attr);
-	if (err)
-		return err;
-
-	immutable->pkey_tbl_len = attr.pkey_tbl_len;
-	immutable->gid_tbl_len = attr.gid_tbl_len;
-	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
-	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
-	return 0;
-}
-
-static int ehca_init_device(struct ehca_shca *shca)
-{
-	int ret;
-
-	ret = init_node_guid(shca);
-	if (ret)
-		return ret;
-
-	strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
-	shca->ib_device.owner               = THIS_MODULE;
-
-	shca->ib_device.uverbs_abi_ver	    = 8;
-	shca->ib_device.uverbs_cmd_mask	    =
-		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
-		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
-		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
-		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
-		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
-		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
-		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
-		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
-		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
-		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
-		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
-		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
-		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
-		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
-		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
-		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
-
-	shca->ib_device.node_type           = RDMA_NODE_IB_CA;
-	shca->ib_device.phys_port_cnt       = shca->num_ports;
-	shca->ib_device.num_comp_vectors    = 1;
-	shca->ib_device.dma_device          = &shca->ofdev->dev;
-	shca->ib_device.query_device        = ehca_query_device;
-	shca->ib_device.query_port          = ehca_query_port;
-	shca->ib_device.query_gid           = ehca_query_gid;
-	shca->ib_device.query_pkey          = ehca_query_pkey;
-	/* shca->in_device.modify_device    = ehca_modify_device    */
-	shca->ib_device.modify_port         = ehca_modify_port;
-	shca->ib_device.alloc_ucontext      = ehca_alloc_ucontext;
-	shca->ib_device.dealloc_ucontext    = ehca_dealloc_ucontext;
-	shca->ib_device.alloc_pd            = ehca_alloc_pd;
-	shca->ib_device.dealloc_pd          = ehca_dealloc_pd;
-	shca->ib_device.create_ah	    = ehca_create_ah;
-	/* shca->ib_device.modify_ah	    = ehca_modify_ah;	    */
-	shca->ib_device.query_ah	    = ehca_query_ah;
-	shca->ib_device.destroy_ah	    = ehca_destroy_ah;
-	shca->ib_device.create_qp	    = ehca_create_qp;
-	shca->ib_device.modify_qp	    = ehca_modify_qp;
-	shca->ib_device.query_qp	    = ehca_query_qp;
-	shca->ib_device.destroy_qp	    = ehca_destroy_qp;
-	shca->ib_device.post_send	    = ehca_post_send;
-	shca->ib_device.post_recv	    = ehca_post_recv;
-	shca->ib_device.create_cq	    = ehca_create_cq;
-	shca->ib_device.destroy_cq	    = ehca_destroy_cq;
-	shca->ib_device.resize_cq	    = ehca_resize_cq;
-	shca->ib_device.poll_cq		    = ehca_poll_cq;
-	/* shca->ib_device.peek_cq	    = ehca_peek_cq;	    */
-	shca->ib_device.req_notify_cq	    = ehca_req_notify_cq;
-	/* shca->ib_device.req_ncomp_notif  = ehca_req_ncomp_notif; */
-	shca->ib_device.get_dma_mr	    = ehca_get_dma_mr;
-	shca->ib_device.reg_user_mr	    = ehca_reg_user_mr;
-	shca->ib_device.dereg_mr	    = ehca_dereg_mr;
-	shca->ib_device.alloc_mw	    = ehca_alloc_mw;
-	shca->ib_device.dealloc_mw	    = ehca_dealloc_mw;
-	shca->ib_device.alloc_fmr	    = ehca_alloc_fmr;
-	shca->ib_device.map_phys_fmr	    = ehca_map_phys_fmr;
-	shca->ib_device.unmap_fmr	    = ehca_unmap_fmr;
-	shca->ib_device.dealloc_fmr	    = ehca_dealloc_fmr;
-	shca->ib_device.attach_mcast	    = ehca_attach_mcast;
-	shca->ib_device.detach_mcast	    = ehca_detach_mcast;
-	shca->ib_device.process_mad	    = ehca_process_mad;
-	shca->ib_device.mmap		    = ehca_mmap;
-	shca->ib_device.dma_ops		    = &ehca_dma_mapping_ops;
-	shca->ib_device.get_port_immutable  = ehca_port_immutable;
-
-	if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
-		shca->ib_device.uverbs_cmd_mask |=
-			(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
-			(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
-			(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
-			(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
-
-		shca->ib_device.create_srq          = ehca_create_srq;
-		shca->ib_device.modify_srq          = ehca_modify_srq;
-		shca->ib_device.query_srq           = ehca_query_srq;
-		shca->ib_device.destroy_srq         = ehca_destroy_srq;
-		shca->ib_device.post_srq_recv       = ehca_post_srq_recv;
-	}
-
-	return ret;
-}
-
-static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
-{
-	struct ehca_sport *sport = &shca->sport[port - 1];
-	struct ib_cq *ibcq;
-	struct ib_qp *ibqp;
-	struct ib_qp_init_attr qp_init_attr;
-	struct ib_cq_init_attr cq_attr = {};
-	int ret;
-
-	if (sport->ibcq_aqp1) {
-		ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
-		return -EPERM;
-	}
-
-	cq_attr.cqe = 10;
-	ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
-			    &cq_attr);
-	if (IS_ERR(ibcq)) {
-		ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
-		return PTR_ERR(ibcq);
-	}
-	sport->ibcq_aqp1 = ibcq;
-
-	if (sport->ibqp_sqp[IB_QPT_GSI]) {
-		ehca_err(&shca->ib_device, "AQP1 QP is already created.");
-		ret = -EPERM;
-		goto create_aqp1;
-	}
-
-	memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
-	qp_init_attr.send_cq          = ibcq;
-	qp_init_attr.recv_cq          = ibcq;
-	qp_init_attr.sq_sig_type      = IB_SIGNAL_ALL_WR;
-	qp_init_attr.cap.max_send_wr  = 100;
-	qp_init_attr.cap.max_recv_wr  = 100;
-	qp_init_attr.cap.max_send_sge = 2;
-	qp_init_attr.cap.max_recv_sge = 1;
-	qp_init_attr.qp_type          = IB_QPT_GSI;
-	qp_init_attr.port_num         = port;
-	qp_init_attr.qp_context       = NULL;
-	qp_init_attr.event_handler    = NULL;
-	qp_init_attr.srq              = NULL;
-
-	ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
-	if (IS_ERR(ibqp)) {
-		ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
-		ret = PTR_ERR(ibqp);
-		goto create_aqp1;
-	}
-	sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
-
-	return 0;
-
-create_aqp1:
-	ib_destroy_cq(sport->ibcq_aqp1);
-	return ret;
-}
-
-static int ehca_destroy_aqp1(struct ehca_sport *sport)
-{
-	int ret;
-
-	ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
-	if (ret) {
-		ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
-		return ret;
-	}
-
-	ret = ib_destroy_cq(sport->ibcq_aqp1);
-	if (ret)
-		ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
-
-	return ret;
-}
-
-static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
-}
-
-static ssize_t ehca_store_debug_level(struct device_driver *ddp,
-				      const char *buf, size_t count)
-{
-	int value = (*buf) - '0';
-	if (value >= 0 && value <= 9)
-		ehca_debug_level = value;
-	return 1;
-}
-
-static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
-		   ehca_show_debug_level, ehca_store_debug_level);
-
-static struct attribute *ehca_drv_attrs[] = {
-	&driver_attr_debug_level.attr,
-	NULL
-};
-
-static struct attribute_group ehca_drv_attr_grp = {
-	.attrs = ehca_drv_attrs
-};
-
-static const struct attribute_group *ehca_drv_attr_groups[] = {
-	&ehca_drv_attr_grp,
-	NULL,
-};
-
-#define EHCA_RESOURCE_ATTR(name)                                           \
-static ssize_t  ehca_show_##name(struct device *dev,                       \
-				 struct device_attribute *attr,            \
-				 char *buf)                                \
-{									   \
-	struct ehca_shca *shca;						   \
-	struct hipz_query_hca *rblock;				           \
-	int data;                                                          \
-									   \
-	shca = dev_get_drvdata(dev);					   \
-									   \
-	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);			   \
-	if (!rblock) {						           \
-		dev_err(dev, "Can't allocate rblock memory.\n");           \
-		return 0;						   \
-	}								   \
-									   \
-	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
-		dev_err(dev, "Can't query device properties\n");           \
-		ehca_free_fw_ctrlblock(rblock);			   	   \
-		return 0;					   	   \
-	}								   \
-									   \
-	data = rblock->name;                                               \
-	ehca_free_fw_ctrlblock(rblock);                                    \
-									   \
-	if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1))	   \
-		return snprintf(buf, 256, "1\n");			   \
-	else								   \
-		return snprintf(buf, 256, "%d\n", data);		   \
-									   \
-}									   \
-static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
-
-EHCA_RESOURCE_ATTR(num_ports);
-EHCA_RESOURCE_ATTR(hw_ver);
-EHCA_RESOURCE_ATTR(max_eq);
-EHCA_RESOURCE_ATTR(cur_eq);
-EHCA_RESOURCE_ATTR(max_cq);
-EHCA_RESOURCE_ATTR(cur_cq);
-EHCA_RESOURCE_ATTR(max_qp);
-EHCA_RESOURCE_ATTR(cur_qp);
-EHCA_RESOURCE_ATTR(max_mr);
-EHCA_RESOURCE_ATTR(cur_mr);
-EHCA_RESOURCE_ATTR(max_mw);
-EHCA_RESOURCE_ATTR(cur_mw);
-EHCA_RESOURCE_ATTR(max_pd);
-EHCA_RESOURCE_ATTR(max_ah);
-
-static ssize_t ehca_show_adapter_handle(struct device *dev,
-					struct device_attribute *attr,
-					char *buf)
-{
-	struct ehca_shca *shca = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
-
-}
-static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
-
-static struct attribute *ehca_dev_attrs[] = {
-	&dev_attr_adapter_handle.attr,
-	&dev_attr_num_ports.attr,
-	&dev_attr_hw_ver.attr,
-	&dev_attr_max_eq.attr,
-	&dev_attr_cur_eq.attr,
-	&dev_attr_max_cq.attr,
-	&dev_attr_cur_cq.attr,
-	&dev_attr_max_qp.attr,
-	&dev_attr_cur_qp.attr,
-	&dev_attr_max_mr.attr,
-	&dev_attr_cur_mr.attr,
-	&dev_attr_max_mw.attr,
-	&dev_attr_cur_mw.attr,
-	&dev_attr_max_pd.attr,
-	&dev_attr_max_ah.attr,
-	NULL
-};
-
-static struct attribute_group ehca_dev_attr_grp = {
-	.attrs = ehca_dev_attrs
-};
-
-static int ehca_probe(struct platform_device *dev)
-{
-	struct ehca_shca *shca;
-	const u64 *handle;
-	struct ib_pd *ibpd;
-	int ret, i, eq_size;
-	unsigned long flags;
-
-	handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
-	if (!handle) {
-		ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
-			     dev->dev.of_node->full_name);
-		return -ENODEV;
-	}
-
-	if (!(*handle)) {
-		ehca_gen_err("Wrong eHCA handle for adapter: %s.",
-			     dev->dev.of_node->full_name);
-		return -ENODEV;
-	}
-
-	shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
-	if (!shca) {
-		ehca_gen_err("Cannot allocate shca memory.");
-		return -ENOMEM;
-	}
-
-	mutex_init(&shca->modify_mutex);
-	atomic_set(&shca->num_cqs, 0);
-	atomic_set(&shca->num_qps, 0);
-	shca->max_num_qps = ehca_max_qp;
-	shca->max_num_cqs = ehca_max_cq;
-
-	for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
-		spin_lock_init(&shca->sport[i].mod_sqp_lock);
-
-	shca->ofdev = dev;
-	shca->ipz_hca_handle.handle = *handle;
-	dev_set_drvdata(&dev->dev, shca);
-
-	ret = ehca_sense_attributes(shca);
-	if (ret < 0) {
-		ehca_gen_err("Cannot sense eHCA attributes.");
-		goto probe1;
-	}
-
-	ret = ehca_init_device(shca);
-	if (ret) {
-		ehca_gen_err("Cannot init ehca  device struct");
-		goto probe1;
-	}
-
-	eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
-	/* create event queues */
-	ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
-	if (ret) {
-		ehca_err(&shca->ib_device, "Cannot create EQ.");
-		goto probe1;
-	}
-
-	ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
-	if (ret) {
-		ehca_err(&shca->ib_device, "Cannot create NEQ.");
-		goto probe3;
-	}
-
-	/* create internal protection domain */
-	ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
-	if (IS_ERR(ibpd)) {
-		ehca_err(&shca->ib_device, "Cannot create internal PD.");
-		ret = PTR_ERR(ibpd);
-		goto probe4;
-	}
-
-	shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
-	shca->pd->ib_pd.device = &shca->ib_device;
-
-	/* create internal max MR */
-	ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
-
-	if (ret) {
-		ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
-			 ret);
-		goto probe5;
-	}
-
-	ret = ib_register_device(&shca->ib_device, NULL);
-	if (ret) {
-		ehca_err(&shca->ib_device,
-			 "ib_register_device() failed ret=%i", ret);
-		goto probe6;
-	}
-
-	/* create AQP1 for port 1 */
-	if (ehca_open_aqp1 == 1) {
-		shca->sport[0].port_state = IB_PORT_DOWN;
-		ret = ehca_create_aqp1(shca, 1);
-		if (ret) {
-			ehca_err(&shca->ib_device,
-				 "Cannot create AQP1 for port 1.");
-			goto probe7;
-		}
-	}
-
-	/* create AQP1 for port 2 */
-	if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
-		shca->sport[1].port_state = IB_PORT_DOWN;
-		ret = ehca_create_aqp1(shca, 2);
-		if (ret) {
-			ehca_err(&shca->ib_device,
-				 "Cannot create AQP1 for port 2.");
-			goto probe8;
-		}
-	}
-
-	ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-	if (ret) /* only complain; we can live without attributes */
-		ehca_err(&shca->ib_device,
-			 "Cannot create device attributes  ret=%d", ret);
-
-	spin_lock_irqsave(&shca_list_lock, flags);
-	list_add(&shca->shca_list, &shca_list);
-	spin_unlock_irqrestore(&shca_list_lock, flags);
-
-	return 0;
-
-probe8:
-	ret = ehca_destroy_aqp1(&shca->sport[0]);
-	if (ret)
-		ehca_err(&shca->ib_device,
-			 "Cannot destroy AQP1 for port 1. ret=%i", ret);
-
-probe7:
-	ib_unregister_device(&shca->ib_device);
-
-probe6:
-	ret = ehca_dereg_internal_maxmr(shca);
-	if (ret)
-		ehca_err(&shca->ib_device,
-			 "Cannot destroy internal MR. ret=%x", ret);
-
-probe5:
-	ret = ehca_dealloc_pd(&shca->pd->ib_pd);
-	if (ret)
-		ehca_err(&shca->ib_device,
-			 "Cannot destroy internal PD. ret=%x", ret);
-
-probe4:
-	ret = ehca_destroy_eq(shca, &shca->neq);
-	if (ret)
-		ehca_err(&shca->ib_device,
-			 "Cannot destroy NEQ. ret=%x", ret);
-
-probe3:
-	ret = ehca_destroy_eq(shca, &shca->eq);
-	if (ret)
-		ehca_err(&shca->ib_device,
-			 "Cannot destroy EQ. ret=%x", ret);
-
-probe1:
-	ib_dealloc_device(&shca->ib_device);
-
-	return -EINVAL;
-}
-
-static int ehca_remove(struct platform_device *dev)
-{
-	struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
-	unsigned long flags;
-	int ret;
-
-	sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-
-	if (ehca_open_aqp1 == 1) {
-		int i;
-		for (i = 0; i < shca->num_ports; i++) {
-			ret = ehca_destroy_aqp1(&shca->sport[i]);
-			if (ret)
-				ehca_err(&shca->ib_device,
-					 "Cannot destroy AQP1 for port %x "
-					 "ret=%i", ret, i);
-		}
-	}
-
-	ib_unregister_device(&shca->ib_device);
-
-	ret = ehca_dereg_internal_maxmr(shca);
-	if (ret)
-		ehca_err(&shca->ib_device,
-			 "Cannot destroy internal MR. ret=%i", ret);
-
-	ret = ehca_dealloc_pd(&shca->pd->ib_pd);
-	if (ret)
-		ehca_err(&shca->ib_device,
-			 "Cannot destroy internal PD. ret=%i", ret);
-
-	ret = ehca_destroy_eq(shca, &shca->eq);
-	if (ret)
-		ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
-
-	ret = ehca_destroy_eq(shca, &shca->neq);
-	if (ret)
-		ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
-
-	ib_dealloc_device(&shca->ib_device);
-
-	spin_lock_irqsave(&shca_list_lock, flags);
-	list_del(&shca->shca_list);
-	spin_unlock_irqrestore(&shca_list_lock, flags);
-
-	return ret;
-}
-
-static struct of_device_id ehca_device_table[] =
-{
-	{
-		.name       = "lhca",
-		.compatible = "IBM,lhca",
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(of, ehca_device_table);
-
-static struct platform_driver ehca_driver = {
-	.probe       = ehca_probe,
-	.remove      = ehca_remove,
-	.driver = {
-		.name = "ehca",
-		.owner = THIS_MODULE,
-		.groups = ehca_drv_attr_groups,
-		.of_match_table = ehca_device_table,
-	},
-};
-
-void ehca_poll_eqs(unsigned long data)
-{
-	struct ehca_shca *shca;
-
-	spin_lock(&shca_list_lock);
-	list_for_each_entry(shca, &shca_list, shca_list) {
-		if (shca->eq.is_initialized) {
-			/* call deadman proc only if eq ptr does not change */
-			struct ehca_eq *eq = &shca->eq;
-			int max = 3;
-			volatile u64 q_ofs, q_ofs2;
-			unsigned long flags;
-			spin_lock_irqsave(&eq->spinlock, flags);
-			q_ofs = eq->ipz_queue.current_q_offset;
-			spin_unlock_irqrestore(&eq->spinlock, flags);
-			do {
-				spin_lock_irqsave(&eq->spinlock, flags);
-				q_ofs2 = eq->ipz_queue.current_q_offset;
-				spin_unlock_irqrestore(&eq->spinlock, flags);
-				max--;
-			} while (q_ofs == q_ofs2 && max > 0);
-			if (q_ofs == q_ofs2)
-				ehca_process_eq(shca, 0);
-		}
-	}
-	mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
-	spin_unlock(&shca_list_lock);
-}
-
-static int ehca_mem_notifier(struct notifier_block *nb,
-			     unsigned long action, void *data)
-{
-	static unsigned long ehca_dmem_warn_time;
-	unsigned long flags;
-
-	switch (action) {
-	case MEM_CANCEL_OFFLINE:
-	case MEM_CANCEL_ONLINE:
-	case MEM_ONLINE:
-	case MEM_OFFLINE:
-		return NOTIFY_OK;
-	case MEM_GOING_ONLINE:
-	case MEM_GOING_OFFLINE:
-		/* only ok if no hca is attached to the lpar */
-		spin_lock_irqsave(&shca_list_lock, flags);
-		if (list_empty(&shca_list)) {
-			spin_unlock_irqrestore(&shca_list_lock, flags);
-			return NOTIFY_OK;
-		} else {
-			spin_unlock_irqrestore(&shca_list_lock, flags);
-			if (printk_timed_ratelimit(&ehca_dmem_warn_time,
-						   30 * 1000))
-				ehca_gen_err("DMEM operations are not allowed"
-					     "in conjunction with eHCA");
-			return NOTIFY_BAD;
-		}
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block ehca_mem_nb = {
-	.notifier_call = ehca_mem_notifier,
-};
-
-static int __init ehca_module_init(void)
-{
-	int ret;
-
-	printk(KERN_INFO "eHCA Infiniband Device Driver "
-	       "(Version " HCAD_VERSION ")\n");
-
-	ret = ehca_create_comp_pool();
-	if (ret) {
-		ehca_gen_err("Cannot create comp pool.");
-		return ret;
-	}
-
-	ret = ehca_create_slab_caches();
-	if (ret) {
-		ehca_gen_err("Cannot create SLAB caches");
-		ret = -ENOMEM;
-		goto module_init1;
-	}
-
-	ret = ehca_create_busmap();
-	if (ret) {
-		ehca_gen_err("Cannot create busmap.");
-		goto module_init2;
-	}
-
-	ret = ibmebus_register_driver(&ehca_driver);
-	if (ret) {
-		ehca_gen_err("Cannot register eHCA device driver");
-		ret = -EINVAL;
-		goto module_init3;
-	}
-
-	ret = register_memory_notifier(&ehca_mem_nb);
-	if (ret) {
-		ehca_gen_err("Failed registering memory add/remove notifier");
-		goto module_init4;
-	}
-
-	if (ehca_poll_all_eqs != 1) {
-		ehca_gen_err("WARNING!!!");
-		ehca_gen_err("It is possible to lose interrupts.");
-	} else {
-		init_timer(&poll_eqs_timer);
-		poll_eqs_timer.function = ehca_poll_eqs;
-		poll_eqs_timer.expires = jiffies + HZ;
-		add_timer(&poll_eqs_timer);
-	}
-
-	return 0;
-
-module_init4:
-	ibmebus_unregister_driver(&ehca_driver);
-
-module_init3:
-	ehca_destroy_busmap();
-
-module_init2:
-	ehca_destroy_slab_caches();
-
-module_init1:
-	ehca_destroy_comp_pool();
-	return ret;
-};
-
-static void __exit ehca_module_exit(void)
-{
-	if (ehca_poll_all_eqs == 1)
-		del_timer_sync(&poll_eqs_timer);
-
-	ibmebus_unregister_driver(&ehca_driver);
-
-	unregister_memory_notifier(&ehca_mem_nb);
-
-	ehca_destroy_busmap();
-
-	ehca_destroy_slab_caches();
-
-	ehca_destroy_comp_pool();
-
-	idr_destroy(&ehca_cq_idr);
-	idr_destroy(&ehca_qp_idr);
-};
-
-module_init(ehca_module_init);
-module_exit(ehca_module_exit);
diff --git a/drivers/staging/rdma/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
deleted file mode 100644
index cec1815..0000000
--- a/drivers/staging/rdma/ehca/ehca_mcast.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  mcast  functions
- *
- *  Authors: Khadija Souissi <souissik@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define MAX_MC_LID 0xFFFE
-#define MIN_MC_LID 0xC000	/* Multicast limits */
-#define EHCA_VALID_MULTICAST_GID(gid)  ((gid)[0] == 0xFF)
-#define EHCA_VALID_MULTICAST_LID(lid) \
-	(((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
-
-int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-	struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
-					      ib_device);
-	union ib_gid my_gid;
-	u64 subnet_prefix, interface_id, h_ret;
-
-	if (ibqp->qp_type != IB_QPT_UD) {
-		ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
-		return -EINVAL;
-	}
-
-	if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-		ehca_err(ibqp->device, "invalid mulitcast gid");
-		return -EINVAL;
-	} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-		ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
-		return -EINVAL;
-	}
-
-	memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
-	subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
-	interface_id = be64_to_cpu(my_gid.global.interface_id);
-	h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
-				   my_qp->ipz_qp_handle,
-				   my_qp->galpas.kernel,
-				   lid, subnet_prefix, interface_id);
-	if (h_ret != H_SUCCESS)
-		ehca_err(ibqp->device,
-			 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
-			 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
-	return ehca2ib_return_code(h_ret);
-}
-
-int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-	struct ehca_shca *shca = container_of(ibqp->pd->device,
-					      struct ehca_shca, ib_device);
-	union ib_gid my_gid;
-	u64 subnet_prefix, interface_id, h_ret;
-
-	if (ibqp->qp_type != IB_QPT_UD) {
-		ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
-		return -EINVAL;
-	}
-
-	if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-		ehca_err(ibqp->device, "invalid mulitcast gid");
-		return -EINVAL;
-	} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-		ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
-		return -EINVAL;
-	}
-
-	memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
-	subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
-	interface_id = be64_to_cpu(my_gid.global.interface_id);
-	h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
-				   my_qp->ipz_qp_handle,
-				   my_qp->galpas.kernel,
-				   lid, subnet_prefix, interface_id);
-	if (h_ret != H_SUCCESS)
-		ehca_err(ibqp->device,
-			 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
-			 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
-	return ehca2ib_return_code(h_ret);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
deleted file mode 100644
index 3367205..0000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ /dev/null
@@ -1,2202 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  MR/MW functions
- *
- *  Authors: Dietmar Decker <ddecker@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <rdma/ib_umem.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "hcp_if.h"
-#include "hipz_hw.h"
-
-#define NUM_CHUNKS(length, chunk_size) \
-	(((length) + (chunk_size - 1)) / (chunk_size))
-
-/* max number of rpages (per hcall register_rpages) */
-#define MAX_RPAGES 512
-
-/* DMEM toleration management */
-#define EHCA_SECTSHIFT        SECTION_SIZE_BITS
-#define EHCA_SECTSIZE          (1UL << EHCA_SECTSHIFT)
-#define EHCA_HUGEPAGESHIFT     34
-#define EHCA_HUGEPAGE_SIZE     (1UL << EHCA_HUGEPAGESHIFT)
-#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
-#define EHCA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
-#define EHCA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
-#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
-#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
-#define EHCA_TOP_MAP_SIZE (0x10000)               /* currently fixed map size */
-#define EHCA_DIR_MAP_SIZE (0x10000)
-#define EHCA_ENT_MAP_SIZE (0x10000)
-#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
-
-static unsigned long ehca_mr_len;
-
-/*
- * Memory map data structures
- */
-struct ehca_dir_bmap {
-	u64 ent[EHCA_MAP_ENTRIES];
-};
-struct ehca_top_bmap {
-	struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
-};
-struct ehca_bmap {
-	struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
-};
-
-static struct ehca_bmap *ehca_bmap;
-
-static struct kmem_cache *mr_cache;
-static struct kmem_cache *mw_cache;
-
-enum ehca_mr_pgsize {
-	EHCA_MR_PGSIZE4K  = 0x1000L,
-	EHCA_MR_PGSIZE64K = 0x10000L,
-	EHCA_MR_PGSIZE1M  = 0x100000L,
-	EHCA_MR_PGSIZE16M = 0x1000000L
-};
-
-#define EHCA_MR_PGSHIFT4K  12
-#define EHCA_MR_PGSHIFT64K 16
-#define EHCA_MR_PGSHIFT1M  20
-#define EHCA_MR_PGSHIFT16M 24
-
-static u64 ehca_map_vaddr(void *caddr);
-
-static u32 ehca_encode_hwpage_size(u32 pgsize)
-{
-	int log = ilog2(pgsize);
-	WARN_ON(log < 12 || log > 24 || log & 3);
-	return (log - 12) / 4;
-}
-
-static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
-{
-	return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
-}
-
-static struct ehca_mr *ehca_mr_new(void)
-{
-	struct ehca_mr *me;
-
-	me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
-	if (me)
-		spin_lock_init(&me->mrlock);
-	else
-		ehca_gen_err("alloc failed");
-
-	return me;
-}
-
-static void ehca_mr_delete(struct ehca_mr *me)
-{
-	kmem_cache_free(mr_cache, me);
-}
-
-static struct ehca_mw *ehca_mw_new(void)
-{
-	struct ehca_mw *me;
-
-	me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
-	if (me)
-		spin_lock_init(&me->mwlock);
-	else
-		ehca_gen_err("alloc failed");
-
-	return me;
-}
-
-static void ehca_mw_delete(struct ehca_mw *me)
-{
-	kmem_cache_free(mw_cache, me);
-}
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
-	struct ib_mr *ib_mr;
-	int ret;
-	struct ehca_mr *e_maxmr;
-	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	struct ehca_shca *shca =
-		container_of(pd->device, struct ehca_shca, ib_device);
-
-	if (shca->maxmr) {
-		e_maxmr = ehca_mr_new();
-		if (!e_maxmr) {
-			ehca_err(&shca->ib_device, "out of memory");
-			ib_mr = ERR_PTR(-ENOMEM);
-			goto get_dma_mr_exit0;
-		}
-
-		ret = ehca_reg_maxmr(shca, e_maxmr,
-				     (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
-				     mr_access_flags, e_pd,
-				     &e_maxmr->ib.ib_mr.lkey,
-				     &e_maxmr->ib.ib_mr.rkey);
-		if (ret) {
-			ehca_mr_delete(e_maxmr);
-			ib_mr = ERR_PTR(ret);
-			goto get_dma_mr_exit0;
-		}
-		ib_mr = &e_maxmr->ib.ib_mr;
-	} else {
-		ehca_err(&shca->ib_device, "no internal max-MR exist!");
-		ib_mr = ERR_PTR(-EINVAL);
-		goto get_dma_mr_exit0;
-	}
-
-get_dma_mr_exit0:
-	if (IS_ERR(ib_mr))
-		ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
-			 PTR_ERR(ib_mr), pd, mr_access_flags);
-	return ib_mr;
-} /* end ehca_get_dma_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-			       u64 virt, int mr_access_flags,
-			       struct ib_udata *udata)
-{
-	struct ib_mr *ib_mr;
-	struct ehca_mr *e_mr;
-	struct ehca_shca *shca =
-		container_of(pd->device, struct ehca_shca, ib_device);
-	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	struct ehca_mr_pginfo pginfo;
-	int ret, page_shift;
-	u32 num_kpages;
-	u32 num_hwpages;
-	u64 hwpage_size;
-
-	if (!pd) {
-		ehca_gen_err("bad pd=%p", pd);
-		return ERR_PTR(-EFAULT);
-	}
-
-	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
-	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
-	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
-	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-		/*
-		 * Remote Write Access requires Local Write Access
-		 * Remote Atomic Access requires Local Write Access
-		 */
-		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-			 mr_access_flags);
-		ib_mr = ERR_PTR(-EINVAL);
-		goto reg_user_mr_exit0;
-	}
-
-	if (length == 0 || virt + length < virt) {
-		ehca_err(pd->device, "bad input values: length=%llx "
-			 "virt_base=%llx", length, virt);
-		ib_mr = ERR_PTR(-EINVAL);
-		goto reg_user_mr_exit0;
-	}
-
-	e_mr = ehca_mr_new();
-	if (!e_mr) {
-		ehca_err(pd->device, "out of memory");
-		ib_mr = ERR_PTR(-ENOMEM);
-		goto reg_user_mr_exit0;
-	}
-
-	e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
-				 mr_access_flags, 0);
-	if (IS_ERR(e_mr->umem)) {
-		ib_mr = (void *)e_mr->umem;
-		goto reg_user_mr_exit1;
-	}
-
-	if (e_mr->umem->page_size != PAGE_SIZE) {
-		ehca_err(pd->device, "page size not supported, "
-			 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
-		ib_mr = ERR_PTR(-EINVAL);
-		goto reg_user_mr_exit2;
-	}
-
-	/* determine number of MR pages */
-	num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
-	/* select proper hw_pgsize */
-	page_shift = PAGE_SHIFT;
-	if (e_mr->umem->hugetlb) {
-		/* determine page_shift, clamp between 4K and 16M */
-		page_shift = (fls64(length - 1) + 3) & ~3;
-		page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
-				 EHCA_MR_PGSHIFT16M);
-	}
-	hwpage_size = 1UL << page_shift;
-
-	/* now that we have the desired page size, shift until it's
-	 * supported, too. 4K is always supported, so this terminates.
-	 */
-	while (!(hwpage_size & shca->hca_cap_mr_pgsize))
-		hwpage_size >>= 4;
-
-reg_user_mr_fallback:
-	num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
-	/* register MR on HCA */
-	memset(&pginfo, 0, sizeof(pginfo));
-	pginfo.type = EHCA_MR_PGI_USER;
-	pginfo.hwpage_size = hwpage_size;
-	pginfo.num_kpages = num_kpages;
-	pginfo.num_hwpages = num_hwpages;
-	pginfo.u.usr.region = e_mr->umem;
-	pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
-	pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
-	ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
-			  e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
-			  &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
-	if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
-		ehca_warn(pd->device, "failed to register mr "
-			  "with hwpage_size=%llx", hwpage_size);
-		ehca_info(pd->device, "try to register mr with "
-			  "kpage_size=%lx", PAGE_SIZE);
-		/*
-		 * this means kpages are not contiguous for a hw page
-		 * try kernel page size as fallback solution
-		 */
-		hwpage_size = PAGE_SIZE;
-		goto reg_user_mr_fallback;
-	}
-	if (ret) {
-		ib_mr = ERR_PTR(ret);
-		goto reg_user_mr_exit2;
-	}
-
-	/* successful registration of all pages */
-	return &e_mr->ib.ib_mr;
-
-reg_user_mr_exit2:
-	ib_umem_release(e_mr->umem);
-reg_user_mr_exit1:
-	ehca_mr_delete(e_mr);
-reg_user_mr_exit0:
-	if (IS_ERR(ib_mr))
-		ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
-			 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
-	return ib_mr;
-} /* end ehca_reg_user_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_mr(struct ib_mr *mr)
-{
-	int ret = 0;
-	u64 h_ret;
-	struct ehca_shca *shca =
-		container_of(mr->device, struct ehca_shca, ib_device);
-	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-
-	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
-		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
-			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
-		ret = -EINVAL;
-		goto dereg_mr_exit0;
-	} else if (e_mr == shca->maxmr) {
-		/* should be impossible, however reject to be sure */
-		ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
-			 "shca->maxmr=%p mr->lkey=%x",
-			 mr, shca->maxmr, mr->lkey);
-		ret = -EINVAL;
-		goto dereg_mr_exit0;
-	}
-
-	/* TODO: BUSY: MR still has bound window(s) */
-	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
-			 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
-			 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
-			 e_mr->ipz_mr_handle.handle, mr->lkey);
-		ret = ehca2ib_return_code(h_ret);
-		goto dereg_mr_exit0;
-	}
-
-	if (e_mr->umem)
-		ib_umem_release(e_mr->umem);
-
-	/* successful deregistration */
-	ehca_mr_delete(e_mr);
-
-dereg_mr_exit0:
-	if (ret)
-		ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
-	return ret;
-} /* end ehca_dereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
-{
-	struct ib_mw *ib_mw;
-	u64 h_ret;
-	struct ehca_mw *e_mw;
-	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	struct ehca_shca *shca =
-		container_of(pd->device, struct ehca_shca, ib_device);
-	struct ehca_mw_hipzout_parms hipzout;
-
-	if (type != IB_MW_TYPE_1)
-		return ERR_PTR(-EINVAL);
-
-	e_mw = ehca_mw_new();
-	if (!e_mw) {
-		ib_mw = ERR_PTR(-ENOMEM);
-		goto alloc_mw_exit0;
-	}
-
-	h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
-					 e_pd->fw_pd, &hipzout);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
-			 "shca=%p hca_hndl=%llx mw=%p",
-			 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
-		ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
-		goto alloc_mw_exit1;
-	}
-	/* successful MW allocation */
-	e_mw->ipz_mw_handle = hipzout.handle;
-	e_mw->ib_mw.rkey    = hipzout.rkey;
-	return &e_mw->ib_mw;
-
-alloc_mw_exit1:
-	ehca_mw_delete(e_mw);
-alloc_mw_exit0:
-	if (IS_ERR(ib_mw))
-		ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
-	return ib_mw;
-} /* end ehca_alloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_mw(struct ib_mw *mw)
-{
-	u64 h_ret;
-	struct ehca_shca *shca =
-		container_of(mw->device, struct ehca_shca, ib_device);
-	struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
-
-	h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
-			 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
-			 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
-			 e_mw->ipz_mw_handle.handle);
-		return ehca2ib_return_code(h_ret);
-	}
-	/* successful deallocation */
-	ehca_mw_delete(e_mw);
-	return 0;
-} /* end ehca_dealloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
-			      int mr_access_flags,
-			      struct ib_fmr_attr *fmr_attr)
-{
-	struct ib_fmr *ib_fmr;
-	struct ehca_shca *shca =
-		container_of(pd->device, struct ehca_shca, ib_device);
-	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	struct ehca_mr *e_fmr;
-	int ret;
-	u32 tmp_lkey, tmp_rkey;
-	struct ehca_mr_pginfo pginfo;
-	u64 hw_pgsize;
-
-	/* check other parameters */
-	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
-	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
-	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
-	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-		/*
-		 * Remote Write Access requires Local Write Access
-		 * Remote Atomic Access requires Local Write Access
-		 */
-		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-			 mr_access_flags);
-		ib_fmr = ERR_PTR(-EINVAL);
-		goto alloc_fmr_exit0;
-	}
-	if (mr_access_flags & IB_ACCESS_MW_BIND) {
-		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-			 mr_access_flags);
-		ib_fmr = ERR_PTR(-EINVAL);
-		goto alloc_fmr_exit0;
-	}
-	if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
-		ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
-			 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
-			 fmr_attr->max_pages, fmr_attr->max_maps,
-			 fmr_attr->page_shift);
-		ib_fmr = ERR_PTR(-EINVAL);
-		goto alloc_fmr_exit0;
-	}
-
-	hw_pgsize = 1 << fmr_attr->page_shift;
-	if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
-		ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
-			 fmr_attr->page_shift);
-		ib_fmr = ERR_PTR(-EINVAL);
-		goto alloc_fmr_exit0;
-	}
-
-	e_fmr = ehca_mr_new();
-	if (!e_fmr) {
-		ib_fmr = ERR_PTR(-ENOMEM);
-		goto alloc_fmr_exit0;
-	}
-	e_fmr->flags |= EHCA_MR_FLAG_FMR;
-
-	/* register MR on HCA */
-	memset(&pginfo, 0, sizeof(pginfo));
-	pginfo.hwpage_size = hw_pgsize;
-	/*
-	 * pginfo.num_hwpages==0, ie register_rpages() will not be called
-	 * but deferred to map_phys_fmr()
-	 */
-	ret = ehca_reg_mr(shca, e_fmr, NULL,
-			  fmr_attr->max_pages * (1 << fmr_attr->page_shift),
-			  mr_access_flags, e_pd, &pginfo,
-			  &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
-	if (ret) {
-		ib_fmr = ERR_PTR(ret);
-		goto alloc_fmr_exit1;
-	}
-
-	/* successful */
-	e_fmr->hwpage_size = hw_pgsize;
-	e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
-	e_fmr->fmr_max_pages = fmr_attr->max_pages;
-	e_fmr->fmr_max_maps = fmr_attr->max_maps;
-	e_fmr->fmr_map_cnt = 0;
-	return &e_fmr->ib.ib_fmr;
-
-alloc_fmr_exit1:
-	ehca_mr_delete(e_fmr);
-alloc_fmr_exit0:
-	return ib_fmr;
-} /* end ehca_alloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
-		      u64 *page_list,
-		      int list_len,
-		      u64 iova)
-{
-	int ret;
-	struct ehca_shca *shca =
-		container_of(fmr->device, struct ehca_shca, ib_device);
-	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-	struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
-	struct ehca_mr_pginfo pginfo;
-	u32 tmp_lkey, tmp_rkey;
-
-	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-		ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
-			 e_fmr, e_fmr->flags);
-		ret = -EINVAL;
-		goto map_phys_fmr_exit0;
-	}
-	ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
-	if (ret)
-		goto map_phys_fmr_exit0;
-	if (iova % e_fmr->fmr_page_size) {
-		/* only whole-numbered pages */
-		ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
-			 iova, e_fmr->fmr_page_size);
-		ret = -EINVAL;
-		goto map_phys_fmr_exit0;
-	}
-	if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
-		/* HCAD does not limit the maps, however trace this anyway */
-		ehca_info(fmr->device, "map limit exceeded, fmr=%p "
-			  "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
-			  fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
-	}
-
-	memset(&pginfo, 0, sizeof(pginfo));
-	pginfo.type = EHCA_MR_PGI_FMR;
-	pginfo.num_kpages = list_len;
-	pginfo.hwpage_size = e_fmr->hwpage_size;
-	pginfo.num_hwpages =
-		list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
-	pginfo.u.fmr.page_list = page_list;
-	pginfo.next_hwpage =
-		(iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
-	pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
-
-	ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
-			    list_len * e_fmr->fmr_page_size,
-			    e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
-	if (ret)
-		goto map_phys_fmr_exit0;
-
-	/* successful reregistration */
-	e_fmr->fmr_map_cnt++;
-	e_fmr->ib.ib_fmr.lkey = tmp_lkey;
-	e_fmr->ib.ib_fmr.rkey = tmp_rkey;
-	return 0;
-
-map_phys_fmr_exit0:
-	if (ret)
-		ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
-			 "iova=%llx", ret, fmr, page_list, list_len, iova);
-	return ret;
-} /* end ehca_map_phys_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_fmr(struct list_head *fmr_list)
-{
-	int ret = 0;
-	struct ib_fmr *ib_fmr;
-	struct ehca_shca *shca = NULL;
-	struct ehca_shca *prev_shca;
-	struct ehca_mr *e_fmr;
-	u32 num_fmr = 0;
-	u32 unmap_fmr_cnt = 0;
-
-	/* check all FMR belong to same SHCA, and check internal flag */
-	list_for_each_entry(ib_fmr, fmr_list, list) {
-		prev_shca = shca;
-		shca = container_of(ib_fmr->device, struct ehca_shca,
-				    ib_device);
-		e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
-		if ((shca != prev_shca) && prev_shca) {
-			ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
-				 "prev_shca=%p e_fmr=%p",
-				 shca, prev_shca, e_fmr);
-			ret = -EINVAL;
-			goto unmap_fmr_exit0;
-		}
-		if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-			ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
-				 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
-			ret = -EINVAL;
-			goto unmap_fmr_exit0;
-		}
-		num_fmr++;
-	}
-
-	/* loop over all FMRs to unmap */
-	list_for_each_entry(ib_fmr, fmr_list, list) {
-		unmap_fmr_cnt++;
-		e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
-		shca = container_of(ib_fmr->device, struct ehca_shca,
-				    ib_device);
-		ret = ehca_unmap_one_fmr(shca, e_fmr);
-		if (ret) {
-			/* unmap failed, stop unmapping of rest of FMRs */
-			ehca_err(&shca->ib_device, "unmap of one FMR failed, "
-				 "stop rest, e_fmr=%p num_fmr=%x "
-				 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
-				 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
-			goto unmap_fmr_exit0;
-		}
-	}
-
-unmap_fmr_exit0:
-	if (ret)
-		ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
-			     ret, fmr_list, num_fmr, unmap_fmr_cnt);
-	return ret;
-} /* end ehca_unmap_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr)
-{
-	int ret;
-	u64 h_ret;
-	struct ehca_shca *shca =
-		container_of(fmr->device, struct ehca_shca, ib_device);
-	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-
-	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-		ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
-			 e_fmr, e_fmr->flags);
-		ret = -EINVAL;
-		goto free_fmr_exit0;
-	}
-
-	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
-			 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
-			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
-			 e_fmr->ipz_mr_handle.handle, fmr->lkey);
-		ret = ehca2ib_return_code(h_ret);
-		goto free_fmr_exit0;
-	}
-	/* successful deregistration */
-	ehca_mr_delete(e_fmr);
-	return 0;
-
-free_fmr_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
-	return ret;
-} /* end ehca_dealloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
-				   struct ehca_mr *e_mr,
-				   struct ehca_mr_pginfo *pginfo);
-
-int ehca_reg_mr(struct ehca_shca *shca,
-		struct ehca_mr *e_mr,
-		u64 *iova_start,
-		u64 size,
-		int acl,
-		struct ehca_pd *e_pd,
-		struct ehca_mr_pginfo *pginfo,
-		u32 *lkey, /*OUT*/
-		u32 *rkey, /*OUT*/
-		enum ehca_reg_type reg_type)
-{
-	int ret;
-	u64 h_ret;
-	u32 hipz_acl;
-	struct ehca_mr_hipzout_parms hipzout;
-
-	ehca_mrmw_map_acl(acl, &hipz_acl);
-	ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-	if (ehca_use_hp_mr == 1)
-		hipz_acl |= 0x00000001;
-
-	h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
-					 (u64)iova_start, size, hipz_acl,
-					 e_pd->fw_pd, &hipzout);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
-			 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
-		ret = ehca2ib_return_code(h_ret);
-		goto ehca_reg_mr_exit0;
-	}
-
-	e_mr->ipz_mr_handle = hipzout.handle;
-
-	if (reg_type == EHCA_REG_BUSMAP_MR)
-		ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
-	else if (reg_type == EHCA_REG_MR)
-		ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
-	else
-		ret = -EINVAL;
-
-	if (ret)
-		goto ehca_reg_mr_exit1;
-
-	/* successful registration */
-	e_mr->num_kpages = pginfo->num_kpages;
-	e_mr->num_hwpages = pginfo->num_hwpages;
-	e_mr->hwpage_size = pginfo->hwpage_size;
-	e_mr->start = iova_start;
-	e_mr->size = size;
-	e_mr->acl = acl;
-	*lkey = hipzout.lkey;
-	*rkey = hipzout.rkey;
-	return 0;
-
-ehca_reg_mr_exit1:
-	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
-			 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
-			 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
-			 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
-			 hipzout.lkey, pginfo, pginfo->num_kpages,
-			 pginfo->num_hwpages, ret);
-		ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
-			 "not recoverable");
-	}
-ehca_reg_mr_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-			 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
-			 "num_kpages=%llx num_hwpages=%llx",
-			 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
-			 pginfo->num_kpages, pginfo->num_hwpages);
-	return ret;
-} /* end ehca_reg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
-		       struct ehca_mr *e_mr,
-		       struct ehca_mr_pginfo *pginfo)
-{
-	int ret = 0;
-	u64 h_ret;
-	u32 rnum;
-	u64 rpage;
-	u32 i;
-	u64 *kpage;
-
-	if (!pginfo->num_hwpages) /* in case of fmr */
-		return 0;
-
-	kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!kpage) {
-		ehca_err(&shca->ib_device, "kpage alloc failed");
-		ret = -ENOMEM;
-		goto ehca_reg_mr_rpages_exit0;
-	}
-
-	/* max MAX_RPAGES ehca mr pages per register call */
-	for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
-
-		if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
-			rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
-			if (rnum == 0)
-				rnum = MAX_RPAGES;      /* last shot is full */
-		} else
-			rnum = MAX_RPAGES;
-
-		ret = ehca_set_pagebuf(pginfo, rnum, kpage);
-		if (ret) {
-			ehca_err(&shca->ib_device, "ehca_set_pagebuf "
-				 "bad rc, ret=%i rnum=%x kpage=%p",
-				 ret, rnum, kpage);
-			goto ehca_reg_mr_rpages_exit1;
-		}
-
-		if (rnum > 1) {
-			rpage = __pa(kpage);
-			if (!rpage) {
-				ehca_err(&shca->ib_device, "kpage=%p i=%x",
-					 kpage, i);
-				ret = -EFAULT;
-				goto ehca_reg_mr_rpages_exit1;
-			}
-		} else
-			rpage = *kpage;
-
-		h_ret = hipz_h_register_rpage_mr(
-			shca->ipz_hca_handle, e_mr,
-			ehca_encode_hwpage_size(pginfo->hwpage_size),
-			0, rpage, rnum);
-
-		if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
-			/*
-			 * check for 'registration complete'==H_SUCCESS
-			 * and for 'page registered'==H_PAGE_REGISTERED
-			 */
-			if (h_ret != H_SUCCESS) {
-				ehca_err(&shca->ib_device, "last "
-					 "hipz_reg_rpage_mr failed, h_ret=%lli "
-					 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
-					 " lkey=%x", h_ret, e_mr, i,
-					 shca->ipz_hca_handle.handle,
-					 e_mr->ipz_mr_handle.handle,
-					 e_mr->ib.ib_mr.lkey);
-				ret = ehca2ib_return_code(h_ret);
-				break;
-			} else
-				ret = 0;
-		} else if (h_ret != H_PAGE_REGISTERED) {
-			ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
-				 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
-				 "mr_hndl=%llx", h_ret, e_mr, i,
-				 e_mr->ib.ib_mr.lkey,
-				 shca->ipz_hca_handle.handle,
-				 e_mr->ipz_mr_handle.handle);
-			ret = ehca2ib_return_code(h_ret);
-			break;
-		} else
-			ret = 0;
-	} /* end for(i) */
-
-
-ehca_reg_mr_rpages_exit1:
-	ehca_free_fw_ctrlblock(kpage);
-ehca_reg_mr_rpages_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
-			 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
-			 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
-	return ret;
-} /* end ehca_reg_mr_rpages() */
-
-/*----------------------------------------------------------------------*/
-
-inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
-				struct ehca_mr *e_mr,
-				u64 *iova_start,
-				u64 size,
-				u32 acl,
-				struct ehca_pd *e_pd,
-				struct ehca_mr_pginfo *pginfo,
-				u32 *lkey, /*OUT*/
-				u32 *rkey) /*OUT*/
-{
-	int ret;
-	u64 h_ret;
-	u32 hipz_acl;
-	u64 *kpage;
-	u64 rpage;
-	struct ehca_mr_pginfo pginfo_save;
-	struct ehca_mr_hipzout_parms hipzout;
-
-	ehca_mrmw_map_acl(acl, &hipz_acl);
-	ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-
-	kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!kpage) {
-		ehca_err(&shca->ib_device, "kpage alloc failed");
-		ret = -ENOMEM;
-		goto ehca_rereg_mr_rereg1_exit0;
-	}
-
-	pginfo_save = *pginfo;
-	ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
-	if (ret) {
-		ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
-			 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
-			 "kpage=%p", e_mr, pginfo, pginfo->type,
-			 pginfo->num_kpages, pginfo->num_hwpages, kpage);
-		goto ehca_rereg_mr_rereg1_exit1;
-	}
-	rpage = __pa(kpage);
-	if (!rpage) {
-		ehca_err(&shca->ib_device, "kpage=%p", kpage);
-		ret = -EFAULT;
-		goto ehca_rereg_mr_rereg1_exit1;
-	}
-	h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
-				      (u64)iova_start, size, hipz_acl,
-				      e_pd->fw_pd, rpage, &hipzout);
-	if (h_ret != H_SUCCESS) {
-		/*
-		 * reregistration unsuccessful, try it again with the 3 hCalls,
-		 * e.g. this is required in case H_MR_CONDITION
-		 * (MW bound or MR is shared)
-		 */
-		ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
-			  "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
-		*pginfo = pginfo_save;
-		ret = -EAGAIN;
-	} else if ((u64 *)hipzout.vaddr != iova_start) {
-		ehca_err(&shca->ib_device, "PHYP changed iova_start in "
-			 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
-			 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
-			 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
-			 e_mr->ib.ib_mr.lkey, hipzout.lkey);
-		ret = -EFAULT;
-	} else {
-		/*
-		 * successful reregistration
-		 * note: start and start_out are identical for eServer HCAs
-		 */
-		e_mr->num_kpages = pginfo->num_kpages;
-		e_mr->num_hwpages = pginfo->num_hwpages;
-		e_mr->hwpage_size = pginfo->hwpage_size;
-		e_mr->start = iova_start;
-		e_mr->size = size;
-		e_mr->acl = acl;
-		*lkey = hipzout.lkey;
-		*rkey = hipzout.rkey;
-	}
-
-ehca_rereg_mr_rereg1_exit1:
-	ehca_free_fw_ctrlblock(kpage);
-ehca_rereg_mr_rereg1_exit0:
-	if ( ret && (ret != -EAGAIN) )
-		ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
-			 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
-			 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
-			 pginfo->num_hwpages);
-	return ret;
-} /* end ehca_rereg_mr_rereg1() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_rereg_mr(struct ehca_shca *shca,
-		  struct ehca_mr *e_mr,
-		  u64 *iova_start,
-		  u64 size,
-		  int acl,
-		  struct ehca_pd *e_pd,
-		  struct ehca_mr_pginfo *pginfo,
-		  u32 *lkey,
-		  u32 *rkey)
-{
-	int ret = 0;
-	u64 h_ret;
-	int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
-	int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
-
-	/* first determine reregistration hCall(s) */
-	if ((pginfo->num_hwpages > MAX_RPAGES) ||
-	    (e_mr->num_hwpages > MAX_RPAGES) ||
-	    (pginfo->num_hwpages > e_mr->num_hwpages)) {
-		ehca_dbg(&shca->ib_device, "Rereg3 case, "
-			 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
-			 pginfo->num_hwpages, e_mr->num_hwpages);
-		rereg_1_hcall = 0;
-		rereg_3_hcall = 1;
-	}
-
-	if (e_mr->flags & EHCA_MR_FLAG_MAXMR) {	/* check for max-MR */
-		rereg_1_hcall = 0;
-		rereg_3_hcall = 1;
-		e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
-		ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
-			 e_mr);
-	}
-
-	if (rereg_1_hcall) {
-		ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
-					   acl, e_pd, pginfo, lkey, rkey);
-		if (ret) {
-			if (ret == -EAGAIN)
-				rereg_3_hcall = 1;
-			else
-				goto ehca_rereg_mr_exit0;
-		}
-	}
-
-	if (rereg_3_hcall) {
-		struct ehca_mr save_mr;
-
-		/* first deregister old MR */
-		h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-		if (h_ret != H_SUCCESS) {
-			ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-				 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
-				 "mr->lkey=%x",
-				 h_ret, e_mr, shca->ipz_hca_handle.handle,
-				 e_mr->ipz_mr_handle.handle,
-				 e_mr->ib.ib_mr.lkey);
-			ret = ehca2ib_return_code(h_ret);
-			goto ehca_rereg_mr_exit0;
-		}
-		/* clean ehca_mr_t, without changing struct ib_mr and lock */
-		save_mr = *e_mr;
-		ehca_mr_deletenew(e_mr);
-
-		/* set some MR values */
-		e_mr->flags = save_mr.flags;
-		e_mr->hwpage_size = save_mr.hwpage_size;
-		e_mr->fmr_page_size = save_mr.fmr_page_size;
-		e_mr->fmr_max_pages = save_mr.fmr_max_pages;
-		e_mr->fmr_max_maps = save_mr.fmr_max_maps;
-		e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
-
-		ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
-				  e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
-		if (ret) {
-			u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
-			memcpy(&e_mr->flags, &(save_mr.flags),
-			       sizeof(struct ehca_mr) - offset);
-			goto ehca_rereg_mr_exit0;
-		}
-	}
-
-ehca_rereg_mr_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-			 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
-			 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
-			 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
-			 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
-			 rereg_1_hcall, rereg_3_hcall);
-	return ret;
-} /* end ehca_rereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
-		       struct ehca_mr *e_fmr)
-{
-	int ret = 0;
-	u64 h_ret;
-	struct ehca_pd *e_pd =
-		container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
-	struct ehca_mr save_fmr;
-	u32 tmp_lkey, tmp_rkey;
-	struct ehca_mr_pginfo pginfo;
-	struct ehca_mr_hipzout_parms hipzout;
-	struct ehca_mr save_mr;
-
-	if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
-		/*
-		 * note: after using rereg hcall with len=0,
-		 * rereg hcall must be used again for registering pages
-		 */
-		h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
-					      0, 0, e_pd->fw_pd, 0, &hipzout);
-		if (h_ret == H_SUCCESS) {
-			/* successful reregistration */
-			e_fmr->start = NULL;
-			e_fmr->size = 0;
-			tmp_lkey = hipzout.lkey;
-			tmp_rkey = hipzout.rkey;
-			return 0;
-		}
-		/*
-		 * should not happen, because length checked above,
-		 * FMRs are not shared and no MW bound to FMRs
-		 */
-		ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
-			 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
-			 "mr_hndl=%llx lkey=%x lkey_out=%x",
-			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
-			 e_fmr->ipz_mr_handle.handle,
-			 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
-		/* try free and rereg */
-	}
-
-	/* first free old FMR */
-	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-			 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
-			 "lkey=%x",
-			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
-			 e_fmr->ipz_mr_handle.handle,
-			 e_fmr->ib.ib_fmr.lkey);
-		ret = ehca2ib_return_code(h_ret);
-		goto ehca_unmap_one_fmr_exit0;
-	}
-	/* clean ehca_mr_t, without changing lock */
-	save_fmr = *e_fmr;
-	ehca_mr_deletenew(e_fmr);
-
-	/* set some MR values */
-	e_fmr->flags = save_fmr.flags;
-	e_fmr->hwpage_size = save_fmr.hwpage_size;
-	e_fmr->fmr_page_size = save_fmr.fmr_page_size;
-	e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
-	e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
-	e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
-	e_fmr->acl = save_fmr.acl;
-
-	memset(&pginfo, 0, sizeof(pginfo));
-	pginfo.type = EHCA_MR_PGI_FMR;
-	ret = ehca_reg_mr(shca, e_fmr, NULL,
-			  (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
-			  e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
-			  &tmp_rkey, EHCA_REG_MR);
-	if (ret) {
-		u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
-		memcpy(&e_fmr->flags, &(save_mr.flags),
-		       sizeof(struct ehca_mr) - offset);
-	}
-
-ehca_unmap_one_fmr_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
-			 "fmr_max_pages=%x",
-			 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
-	return ret;
-} /* end ehca_unmap_one_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_smr(struct ehca_shca *shca,
-		 struct ehca_mr *e_origmr,
-		 struct ehca_mr *e_newmr,
-		 u64 *iova_start,
-		 int acl,
-		 struct ehca_pd *e_pd,
-		 u32 *lkey, /*OUT*/
-		 u32 *rkey) /*OUT*/
-{
-	int ret = 0;
-	u64 h_ret;
-	u32 hipz_acl;
-	struct ehca_mr_hipzout_parms hipzout;
-
-	ehca_mrmw_map_acl(acl, &hipz_acl);
-	ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
-	h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
-				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
-				    &hipzout);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
-			 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
-			 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
-			 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
-			 shca->ipz_hca_handle.handle,
-			 e_origmr->ipz_mr_handle.handle,
-			 e_origmr->ib.ib_mr.lkey);
-		ret = ehca2ib_return_code(h_ret);
-		goto ehca_reg_smr_exit0;
-	}
-	/* successful registration */
-	e_newmr->num_kpages = e_origmr->num_kpages;
-	e_newmr->num_hwpages = e_origmr->num_hwpages;
-	e_newmr->hwpage_size   = e_origmr->hwpage_size;
-	e_newmr->start = iova_start;
-	e_newmr->size = e_origmr->size;
-	e_newmr->acl = acl;
-	e_newmr->ipz_mr_handle = hipzout.handle;
-	*lkey = hipzout.lkey;
-	*rkey = hipzout.rkey;
-	return 0;
-
-ehca_reg_smr_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
-			 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
-			 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-	return ret;
-} /* end ehca_reg_smr() */
-
-/*----------------------------------------------------------------------*/
-static inline void *ehca_calc_sectbase(int top, int dir, int idx)
-{
-	unsigned long ret = idx;
-	ret |= dir << EHCA_DIR_INDEX_SHIFT;
-	ret |= top << EHCA_TOP_INDEX_SHIFT;
-	return __va(ret << SECTION_SIZE_BITS);
-}
-
-#define ehca_bmap_valid(entry) \
-	((u64)entry != (u64)EHCA_INVAL_ADDR)
-
-static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
-			       struct ehca_shca *shca, struct ehca_mr *mr,
-			       struct ehca_mr_pginfo *pginfo)
-{
-	u64 h_ret = 0;
-	unsigned long page = 0;
-	u64 rpage = __pa(kpage);
-	int page_count;
-
-	void *sectbase = ehca_calc_sectbase(top, dir, idx);
-	if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
-		ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
-					   "hwpage_size does not fit to "
-					   "section start address");
-	}
-	page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
-
-	while (page < page_count) {
-		u64 rnum;
-		for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
-		     rnum++) {
-			void *pg = sectbase + ((page++) * pginfo->hwpage_size);
-			kpage[rnum] = __pa(pg);
-		}
-
-		h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
-			ehca_encode_hwpage_size(pginfo->hwpage_size),
-			0, rpage, rnum);
-
-		if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
-			ehca_err(&shca->ib_device, "register_rpage_mr failed");
-			return h_ret;
-		}
-	}
-	return h_ret;
-}
-
-static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
-				struct ehca_shca *shca, struct ehca_mr *mr,
-				struct ehca_mr_pginfo *pginfo)
-{
-	u64 hret = H_SUCCESS;
-	int idx;
-
-	for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
-		if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
-			continue;
-
-		hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
-					   pginfo);
-		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
-				return hret;
-	}
-	return hret;
-}
-
-static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
-				    struct ehca_mr *mr,
-				    struct ehca_mr_pginfo *pginfo)
-{
-	u64 hret = H_SUCCESS;
-	int dir;
-
-	for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
-		if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-			continue;
-
-		hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
-		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
-				return hret;
-	}
-	return hret;
-}
-
-/* register internal max-MR to internal SHCA */
-int ehca_reg_internal_maxmr(
-	struct ehca_shca *shca,
-	struct ehca_pd *e_pd,
-	struct ehca_mr **e_maxmr)  /*OUT*/
-{
-	int ret;
-	struct ehca_mr *e_mr;
-	u64 *iova_start;
-	u64 size_maxmr;
-	struct ehca_mr_pginfo pginfo;
-	u32 num_kpages;
-	u32 num_hwpages;
-	u64 hw_pgsize;
-
-	if (!ehca_bmap) {
-		ret = -EFAULT;
-		goto ehca_reg_internal_maxmr_exit0;
-	}
-
-	e_mr = ehca_mr_new();
-	if (!e_mr) {
-		ehca_err(&shca->ib_device, "out of memory");
-		ret = -ENOMEM;
-		goto ehca_reg_internal_maxmr_exit0;
-	}
-	e_mr->flags |= EHCA_MR_FLAG_MAXMR;
-
-	/* register internal max-MR on HCA */
-	size_maxmr = ehca_mr_len;
-	iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
-	num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
-				PAGE_SIZE);
-	hw_pgsize = ehca_get_max_hwpage_size(shca);
-	num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
-				 hw_pgsize);
-
-	memset(&pginfo, 0, sizeof(pginfo));
-	pginfo.type = EHCA_MR_PGI_PHYS;
-	pginfo.num_kpages = num_kpages;
-	pginfo.num_hwpages = num_hwpages;
-	pginfo.hwpage_size = hw_pgsize;
-	pginfo.u.phy.addr = 0;
-	pginfo.u.phy.size = size_maxmr;
-
-	ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
-			  &pginfo, &e_mr->ib.ib_mr.lkey,
-			  &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
-	if (ret) {
-		ehca_err(&shca->ib_device, "reg of internal max MR failed, "
-			 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
-			 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
-			 num_kpages, num_hwpages);
-		goto ehca_reg_internal_maxmr_exit1;
-	}
-
-	/* successful registration of all pages */
-	e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
-	e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
-	e_mr->ib.ib_mr.uobject = NULL;
-	atomic_inc(&(e_pd->ib_pd.usecnt));
-	*e_maxmr = e_mr;
-	return 0;
-
-ehca_reg_internal_maxmr_exit1:
-	ehca_mr_delete(e_mr);
-ehca_reg_internal_maxmr_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
-			 ret, shca, e_pd, e_maxmr);
-	return ret;
-} /* end ehca_reg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
-		   struct ehca_mr *e_newmr,
-		   u64 *iova_start,
-		   int acl,
-		   struct ehca_pd *e_pd,
-		   u32 *lkey,
-		   u32 *rkey)
-{
-	u64 h_ret;
-	struct ehca_mr *e_origmr = shca->maxmr;
-	u32 hipz_acl;
-	struct ehca_mr_hipzout_parms hipzout;
-
-	ehca_mrmw_map_acl(acl, &hipz_acl);
-	ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
-	h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
-				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
-				    &hipzout);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
-			 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
-			 h_ret, e_origmr, shca->ipz_hca_handle.handle,
-			 e_origmr->ipz_mr_handle.handle,
-			 e_origmr->ib.ib_mr.lkey);
-		return ehca2ib_return_code(h_ret);
-	}
-	/* successful registration */
-	e_newmr->num_kpages = e_origmr->num_kpages;
-	e_newmr->num_hwpages = e_origmr->num_hwpages;
-	e_newmr->hwpage_size = e_origmr->hwpage_size;
-	e_newmr->start = iova_start;
-	e_newmr->size = e_origmr->size;
-	e_newmr->acl = acl;
-	e_newmr->ipz_mr_handle = hipzout.handle;
-	*lkey = hipzout.lkey;
-	*rkey = hipzout.rkey;
-	return 0;
-} /* end ehca_reg_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
-{
-	int ret;
-	struct ehca_mr *e_maxmr;
-	struct ib_pd *ib_pd;
-
-	if (!shca->maxmr) {
-		ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
-		ret = -EINVAL;
-		goto ehca_dereg_internal_maxmr_exit0;
-	}
-
-	e_maxmr = shca->maxmr;
-	ib_pd = e_maxmr->ib.ib_mr.pd;
-	shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
-
-	ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
-	if (ret) {
-		ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
-			 "ret=%i e_maxmr=%p shca=%p lkey=%x",
-			 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
-		shca->maxmr = e_maxmr;
-		goto ehca_dereg_internal_maxmr_exit0;
-	}
-
-	atomic_dec(&ib_pd->usecnt);
-
-ehca_dereg_internal_maxmr_exit0:
-	if (ret)
-		ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
-			 ret, shca, shca->maxmr);
-	return ret;
-} /* end ehca_dereg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* check page list of map FMR verb for validness */
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
-			     u64 *page_list,
-			     int list_len)
-{
-	u32 i;
-	u64 *page;
-
-	if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
-		ehca_gen_err("bad list_len, list_len=%x "
-			     "e_fmr->fmr_max_pages=%x fmr=%p",
-			     list_len, e_fmr->fmr_max_pages, e_fmr);
-		return -EINVAL;
-	}
-
-	/* each page must be aligned */
-	page = page_list;
-	for (i = 0; i < list_len; i++) {
-		if (*page % e_fmr->fmr_page_size) {
-			ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
-				     "fmr_page_size=%x", i, *page, page, e_fmr,
-				     e_fmr->fmr_page_size);
-			return -EINVAL;
-		}
-		page++;
-	}
-
-	return 0;
-} /* end ehca_fmr_check_page_list() */
-
-/*----------------------------------------------------------------------*/
-
-/* PAGE_SIZE >= pginfo->hwpage_size */
-static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
-				  u32 number,
-				  u64 *kpage)
-{
-	int ret = 0;
-	u64 pgaddr;
-	u32 j = 0;
-	int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
-	struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
-	while (*sg != NULL) {
-		pgaddr = page_to_pfn(sg_page(*sg))
-			<< PAGE_SHIFT;
-		*kpage = pgaddr + (pginfo->next_hwpage *
-				   pginfo->hwpage_size);
-		if (!(*kpage)) {
-			ehca_gen_err("pgaddr=%llx "
-				     "sg_dma_address=%llx "
-				     "entry=%llx next_hwpage=%llx",
-				     pgaddr, (u64)sg_dma_address(*sg),
-				     pginfo->u.usr.next_nmap,
-				     pginfo->next_hwpage);
-			return -EFAULT;
-		}
-		(pginfo->hwpage_cnt)++;
-		(pginfo->next_hwpage)++;
-		kpage++;
-		if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
-			(pginfo->kpage_cnt)++;
-			(pginfo->u.usr.next_nmap)++;
-			pginfo->next_hwpage = 0;
-			*sg = sg_next(*sg);
-		}
-		j++;
-		if (j >= number)
-			break;
-	}
-
-	return ret;
-}
-
-/*
- * check given pages for contiguous layout
- * last page addr is returned in prev_pgaddr for further check
- */
-static int ehca_check_kpages_per_ate(struct scatterlist **sg,
-				     int num_pages,
-				     u64 *prev_pgaddr)
-{
-	for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
-		u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
-		if (ehca_debug_level >= 3)
-			ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
-				     *(u64 *)__va(pgaddr));
-		if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
-			ehca_gen_err("uncontiguous page found pgaddr=%llx "
-				     "prev_pgaddr=%llx entries_left_in_hwpage=%x",
-				     pgaddr, *prev_pgaddr, num_pages);
-			return -EINVAL;
-		}
-		*prev_pgaddr = pgaddr;
-	}
-	return 0;
-}
-
-/* PAGE_SIZE < pginfo->hwpage_size */
-static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
-				  u32 number,
-				  u64 *kpage)
-{
-	int ret = 0;
-	u64 pgaddr, prev_pgaddr;
-	u32 j = 0;
-	int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
-	int nr_kpages = kpages_per_hwpage;
-	struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
-	while (*sg != NULL) {
-
-		if (nr_kpages == kpages_per_hwpage) {
-			pgaddr = (page_to_pfn(sg_page(*sg))
-				   << PAGE_SHIFT);
-			*kpage = pgaddr;
-			if (!(*kpage)) {
-				ehca_gen_err("pgaddr=%llx entry=%llx",
-					     pgaddr, pginfo->u.usr.next_nmap);
-				ret = -EFAULT;
-				return ret;
-			}
-			/*
-			 * The first page in a hwpage must be aligned;
-			 * the first MR page is exempt from this rule.
-			 */
-			if (pgaddr & (pginfo->hwpage_size - 1)) {
-				if (pginfo->hwpage_cnt) {
-					ehca_gen_err(
-						"invalid alignment "
-						"pgaddr=%llx entry=%llx "
-						"mr_pgsize=%llx",
-						pgaddr, pginfo->u.usr.next_nmap,
-						pginfo->hwpage_size);
-					ret = -EFAULT;
-					return ret;
-				}
-				/* first MR page */
-				pginfo->kpage_cnt =
-					(pgaddr &
-					 (pginfo->hwpage_size - 1)) >>
-					PAGE_SHIFT;
-				nr_kpages -= pginfo->kpage_cnt;
-				*kpage = pgaddr &
-					 ~(pginfo->hwpage_size - 1);
-			}
-			if (ehca_debug_level >= 3) {
-				u64 val = *(u64 *)__va(pgaddr);
-				ehca_gen_dbg("kpage=%llx page=%llx "
-					     "value=%016llx",
-					     *kpage, pgaddr, val);
-			}
-			prev_pgaddr = pgaddr;
-			*sg = sg_next(*sg);
-			pginfo->kpage_cnt++;
-			pginfo->u.usr.next_nmap++;
-			nr_kpages--;
-			if (!nr_kpages)
-				goto next_kpage;
-			continue;
-		}
-
-		ret = ehca_check_kpages_per_ate(sg, nr_kpages,
-						&prev_pgaddr);
-		if (ret)
-			return ret;
-		pginfo->kpage_cnt += nr_kpages;
-		pginfo->u.usr.next_nmap += nr_kpages;
-
-next_kpage:
-		nr_kpages = kpages_per_hwpage;
-		(pginfo->hwpage_cnt)++;
-		kpage++;
-		j++;
-		if (j >= number)
-			break;
-	}
-
-	return ret;
-}
-
-static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
-				 u32 number, u64 *kpage)
-{
-	int ret = 0;
-	u64 addr = pginfo->u.phy.addr;
-	u64 size = pginfo->u.phy.size;
-	u64 num_hw, offs_hw;
-	u32 i = 0;
-
-	num_hw  = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
-				pginfo->hwpage_size);
-	offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
-
-	while (pginfo->next_hwpage < offs_hw + num_hw) {
-		/* sanity check */
-		if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
-		    (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
-			ehca_gen_err("kpage_cnt >= num_kpages, "
-				     "kpage_cnt=%llx num_kpages=%llx "
-				     "hwpage_cnt=%llx "
-				     "num_hwpages=%llx i=%x",
-				     pginfo->kpage_cnt,
-				     pginfo->num_kpages,
-				     pginfo->hwpage_cnt,
-				     pginfo->num_hwpages, i);
-			return -EFAULT;
-		}
-		*kpage = (addr & ~(pginfo->hwpage_size - 1)) +
-			 (pginfo->next_hwpage * pginfo->hwpage_size);
-		if ( !(*kpage) && addr ) {
-			ehca_gen_err("addr=%llx size=%llx "
-				     "next_hwpage=%llx", addr,
-				     size, pginfo->next_hwpage);
-			return -EFAULT;
-		}
-		(pginfo->hwpage_cnt)++;
-		(pginfo->next_hwpage)++;
-		if (PAGE_SIZE >= pginfo->hwpage_size) {
-			if (pginfo->next_hwpage %
-			    (PAGE_SIZE / pginfo->hwpage_size) == 0)
-				(pginfo->kpage_cnt)++;
-		} else
-			pginfo->kpage_cnt += pginfo->hwpage_size /
-				PAGE_SIZE;
-		kpage++;
-		i++;
-		if (i >= number) break;
-	}
-	if (pginfo->next_hwpage >= offs_hw + num_hw) {
-		pginfo->next_hwpage = 0;
-	}
-
-	return ret;
-}
-
-static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
-				u32 number, u64 *kpage)
-{
-	int ret = 0;
-	u64 *fmrlist;
-	u32 i;
-
-	/* loop over desired page_list entries */
-	fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
-	for (i = 0; i < number; i++) {
-		*kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
-			   pginfo->next_hwpage * pginfo->hwpage_size;
-		if ( !(*kpage) ) {
-			ehca_gen_err("*fmrlist=%llx fmrlist=%p "
-				     "next_listelem=%llx next_hwpage=%llx",
-				     *fmrlist, fmrlist,
-				     pginfo->u.fmr.next_listelem,
-				     pginfo->next_hwpage);
-			return -EFAULT;
-		}
-		(pginfo->hwpage_cnt)++;
-		if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
-			if (pginfo->next_hwpage %
-			    (pginfo->u.fmr.fmr_pgsize /
-			     pginfo->hwpage_size) == 0) {
-				(pginfo->kpage_cnt)++;
-				(pginfo->u.fmr.next_listelem)++;
-				fmrlist++;
-				pginfo->next_hwpage = 0;
-			} else
-				(pginfo->next_hwpage)++;
-		} else {
-			unsigned int cnt_per_hwpage = pginfo->hwpage_size /
-				pginfo->u.fmr.fmr_pgsize;
-			unsigned int j;
-			u64 prev = *kpage;
-			/* check if adrs are contiguous */
-			for (j = 1; j < cnt_per_hwpage; j++) {
-				u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
-				if (prev + pginfo->u.fmr.fmr_pgsize != p) {
-					ehca_gen_err("uncontiguous fmr pages "
-						     "found prev=%llx p=%llx "
-						     "idx=%x", prev, p, i + j);
-					return -EINVAL;
-				}
-				prev = p;
-			}
-			pginfo->kpage_cnt += cnt_per_hwpage;
-			pginfo->u.fmr.next_listelem += cnt_per_hwpage;
-			fmrlist += cnt_per_hwpage;
-		}
-		kpage++;
-	}
-	return ret;
-}
-
-/* setup page buffer from page info */
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
-		     u32 number,
-		     u64 *kpage)
-{
-	int ret;
-
-	switch (pginfo->type) {
-	case EHCA_MR_PGI_PHYS:
-		ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
-		break;
-	case EHCA_MR_PGI_USER:
-		ret = PAGE_SIZE >= pginfo->hwpage_size ?
-			ehca_set_pagebuf_user1(pginfo, number, kpage) :
-			ehca_set_pagebuf_user2(pginfo, number, kpage);
-		break;
-	case EHCA_MR_PGI_FMR:
-		ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
-		break;
-	default:
-		ehca_gen_err("bad pginfo->type=%x", pginfo->type);
-		ret = -EFAULT;
-		break;
-	}
-	return ret;
-} /* end ehca_set_pagebuf() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * check MR if it is a max-MR, i.e. uses whole memory
- * in case it's a max-MR 1 is returned, else 0
- */
-int ehca_mr_is_maxmr(u64 size,
-		     u64 *iova_start)
-{
-	/* a MR is treated as max-MR only if it fits following: */
-	if ((size == ehca_mr_len) &&
-	    (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
-		ehca_gen_dbg("this is a max-MR");
-		return 1;
-	} else
-		return 0;
-} /* end ehca_mr_is_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* map access control for MR/MW. This routine is used for MR and MW. */
-void ehca_mrmw_map_acl(int ib_acl,
-		       u32 *hipz_acl)
-{
-	*hipz_acl = 0;
-	if (ib_acl & IB_ACCESS_REMOTE_READ)
-		*hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
-	if (ib_acl & IB_ACCESS_REMOTE_WRITE)
-		*hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
-	if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
-		*hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
-	if (ib_acl & IB_ACCESS_LOCAL_WRITE)
-		*hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
-	if (ib_acl & IB_ACCESS_MW_BIND)
-		*hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
-} /* end ehca_mrmw_map_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/* sets page size in hipz access control for MR/MW. */
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
-{
-	*hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
-} /* end ehca_mrmw_set_pgsize_hipz_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * reverse map access control for MR/MW.
- * This routine is used for MR and MW.
- */
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
-			       int *ib_acl) /*OUT*/
-{
-	*ib_acl = 0;
-	if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
-		*ib_acl |= IB_ACCESS_REMOTE_READ;
-	if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
-		*ib_acl |= IB_ACCESS_REMOTE_WRITE;
-	if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
-		*ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
-	if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
-		*ib_acl |= IB_ACCESS_LOCAL_WRITE;
-	if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
-		*ib_acl |= IB_ACCESS_MW_BIND;
-} /* end ehca_mrmw_reverse_map_acl() */
-
-
-/*----------------------------------------------------------------------*/
-
-/*
- * MR destructor and constructor
- * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
- * except struct ib_mr and spinlock
- */
-void ehca_mr_deletenew(struct ehca_mr *mr)
-{
-	mr->flags = 0;
-	mr->num_kpages = 0;
-	mr->num_hwpages = 0;
-	mr->acl = 0;
-	mr->start = NULL;
-	mr->fmr_page_size = 0;
-	mr->fmr_max_pages = 0;
-	mr->fmr_max_maps = 0;
-	mr->fmr_map_cnt = 0;
-	memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
-	memset(&mr->galpas, 0, sizeof(mr->galpas));
-} /* end ehca_mr_deletenew() */
-
-int ehca_init_mrmw_cache(void)
-{
-	mr_cache = kmem_cache_create("ehca_cache_mr",
-				     sizeof(struct ehca_mr), 0,
-				     SLAB_HWCACHE_ALIGN,
-				     NULL);
-	if (!mr_cache)
-		return -ENOMEM;
-	mw_cache = kmem_cache_create("ehca_cache_mw",
-				     sizeof(struct ehca_mw), 0,
-				     SLAB_HWCACHE_ALIGN,
-				     NULL);
-	if (!mw_cache) {
-		kmem_cache_destroy(mr_cache);
-		mr_cache = NULL;
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-void ehca_cleanup_mrmw_cache(void)
-{
-	kmem_cache_destroy(mr_cache);
-	kmem_cache_destroy(mw_cache);
-}
-
-static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
-				     int dir)
-{
-	if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
-		ehca_top_bmap->dir[dir] =
-			kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
-		if (!ehca_top_bmap->dir[dir])
-			return -ENOMEM;
-		/* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-		memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
-	}
-	return 0;
-}
-
-static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
-{
-	if (!ehca_bmap_valid(ehca_bmap->top[top])) {
-		ehca_bmap->top[top] =
-			kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
-		if (!ehca_bmap->top[top])
-			return -ENOMEM;
-		/* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-		memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
-	}
-	return ehca_init_top_bmap(ehca_bmap->top[top], dir);
-}
-
-static inline int ehca_calc_index(unsigned long i, unsigned long s)
-{
-	return (i >> s) & EHCA_INDEX_MASK;
-}
-
-void ehca_destroy_busmap(void)
-{
-	int top, dir;
-
-	if (!ehca_bmap)
-		return;
-
-	for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
-		if (!ehca_bmap_valid(ehca_bmap->top[top]))
-			continue;
-		for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
-			if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-				continue;
-
-			kfree(ehca_bmap->top[top]->dir[dir]);
-		}
-
-		kfree(ehca_bmap->top[top]);
-	}
-
-	kfree(ehca_bmap);
-	ehca_bmap = NULL;
-}
-
-static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
-{
-	unsigned long i, start_section, end_section;
-	int top, dir, idx;
-
-	if (!nr_pages)
-		return 0;
-
-	if (!ehca_bmap) {
-		ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
-		if (!ehca_bmap)
-			return -ENOMEM;
-		/* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-		memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
-	}
-
-	start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
-	end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
-	for (i = start_section; i < end_section; i++) {
-		int ret;
-		top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
-		dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
-		idx = i & EHCA_INDEX_MASK;
-
-		ret = ehca_init_bmap(ehca_bmap, top, dir);
-		if (ret) {
-			ehca_destroy_busmap();
-			return ret;
-		}
-		ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
-		ehca_mr_len += EHCA_SECTSIZE;
-	}
-	return 0;
-}
-
-static int ehca_is_hugepage(unsigned long pfn)
-{
-	int page_order;
-
-	if (pfn & EHCA_HUGEPAGE_PFN_MASK)
-		return 0;
-
-	page_order = compound_order(pfn_to_page(pfn));
-	if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
-		return 0;
-
-	return 1;
-}
-
-static int ehca_create_busmap_callback(unsigned long initial_pfn,
-				       unsigned long total_nr_pages, void *arg)
-{
-	int ret;
-	unsigned long pfn, start_pfn, end_pfn, nr_pages;
-
-	if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
-		return ehca_update_busmap(initial_pfn, total_nr_pages);
-
-	/* Given chunk is >= 16GB -> check for hugepages */
-	start_pfn = initial_pfn;
-	end_pfn = initial_pfn + total_nr_pages;
-	pfn = start_pfn;
-
-	while (pfn < end_pfn) {
-		if (ehca_is_hugepage(pfn)) {
-			/* Add mem found in front of the hugepage */
-			nr_pages = pfn - start_pfn;
-			ret = ehca_update_busmap(start_pfn, nr_pages);
-			if (ret)
-				return ret;
-			/* Skip the hugepage */
-			pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
-			start_pfn = pfn;
-		} else
-			pfn += (EHCA_SECTSIZE / PAGE_SIZE);
-	}
-
-	/* Add mem found behind the hugepage(s)  */
-	nr_pages = pfn - start_pfn;
-	return ehca_update_busmap(start_pfn, nr_pages);
-}
-
-int ehca_create_busmap(void)
-{
-	int ret;
-
-	ehca_mr_len = 0;
-	ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
-				   ehca_create_busmap_callback);
-	return ret;
-}
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
-				   struct ehca_mr *e_mr,
-				   struct ehca_mr_pginfo *pginfo)
-{
-	int top;
-	u64 hret, *kpage;
-
-	kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!kpage) {
-		ehca_err(&shca->ib_device, "kpage alloc failed");
-		return -ENOMEM;
-	}
-	for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
-		if (!ehca_bmap_valid(ehca_bmap->top[top]))
-			continue;
-		hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
-		if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
-			break;
-	}
-
-	ehca_free_fw_ctrlblock(kpage);
-
-	if (hret == H_SUCCESS)
-		return 0; /* Everything is fine */
-	else {
-		ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
-				 "h_ret=%lli e_mr=%p top=%x lkey=%x "
-				 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
-				 e_mr->ib.ib_mr.lkey,
-				 shca->ipz_hca_handle.handle,
-				 e_mr->ipz_mr_handle.handle);
-		return ehca2ib_return_code(hret);
-	}
-}
-
-static u64 ehca_map_vaddr(void *caddr)
-{
-	int top, dir, idx;
-	unsigned long abs_addr, offset;
-	u64 entry;
-
-	if (!ehca_bmap)
-		return EHCA_INVAL_ADDR;
-
-	abs_addr = __pa(caddr);
-	top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
-	if (!ehca_bmap_valid(ehca_bmap->top[top]))
-		return EHCA_INVAL_ADDR;
-
-	dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
-	if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-		return EHCA_INVAL_ADDR;
-
-	idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
-
-	entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
-	if (ehca_bmap_valid(entry)) {
-		offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
-		return entry | offset;
-	} else
-		return EHCA_INVAL_ADDR;
-}
-
-static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
-	return dma_addr == EHCA_INVAL_ADDR;
-}
-
-static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
-			       size_t size, enum dma_data_direction direction)
-{
-	if (cpu_addr)
-		return ehca_map_vaddr(cpu_addr);
-	else
-		return EHCA_INVAL_ADDR;
-}
-
-static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
-				  enum dma_data_direction direction)
-{
-	/* This is only a stub; nothing to be done here */
-}
-
-static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
-			     unsigned long offset, size_t size,
-			     enum dma_data_direction direction)
-{
-	u64 addr;
-
-	if (offset + size > PAGE_SIZE)
-		return EHCA_INVAL_ADDR;
-
-	addr = ehca_map_vaddr(page_address(page));
-	if (!ehca_dma_mapping_error(dev, addr))
-		addr += offset;
-
-	return addr;
-}
-
-static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
-				enum dma_data_direction direction)
-{
-	/* This is only a stub; nothing to be done here */
-}
-
-static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
-			   int nents, enum dma_data_direction direction)
-{
-	struct scatterlist *sg;
-	int i;
-
-	for_each_sg(sgl, sg, nents, i) {
-		u64 addr;
-		addr = ehca_map_vaddr(sg_virt(sg));
-		if (ehca_dma_mapping_error(dev, addr))
-			return 0;
-
-		sg->dma_address = addr;
-		sg->dma_length = sg->length;
-	}
-	return nents;
-}
-
-static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
-			      int nents, enum dma_data_direction direction)
-{
-	/* This is only a stub; nothing to be done here */
-}
-
-static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
-					 size_t size,
-					 enum dma_data_direction dir)
-{
-	dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
-}
-
-static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
-					    size_t size,
-					    enum dma_data_direction dir)
-{
-	dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
-				     u64 *dma_handle, gfp_t flag)
-{
-	struct page *p;
-	void *addr = NULL;
-	u64 dma_addr;
-
-	p = alloc_pages(flag, get_order(size));
-	if (p) {
-		addr = page_address(p);
-		dma_addr = ehca_map_vaddr(addr);
-		if (ehca_dma_mapping_error(dev, dma_addr)) {
-			free_pages((unsigned long)addr,	get_order(size));
-			return NULL;
-		}
-		if (dma_handle)
-			*dma_handle = dma_addr;
-		return addr;
-	}
-	return NULL;
-}
-
-static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
-				   void *cpu_addr, u64 dma_handle)
-{
-	if (cpu_addr && size)
-		free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-
-struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
-	.mapping_error          = ehca_dma_mapping_error,
-	.map_single             = ehca_dma_map_single,
-	.unmap_single           = ehca_dma_unmap_single,
-	.map_page               = ehca_dma_map_page,
-	.unmap_page             = ehca_dma_unmap_page,
-	.map_sg                 = ehca_dma_map_sg,
-	.unmap_sg               = ehca_dma_unmap_sg,
-	.sync_single_for_cpu    = ehca_dma_sync_single_for_cpu,
-	.sync_single_for_device = ehca_dma_sync_single_for_device,
-	.alloc_coherent         = ehca_dma_alloc_coherent,
-	.free_coherent          = ehca_dma_free_coherent,
-};
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
deleted file mode 100644
index 52bfa95..0000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  MR/MW declarations and inline functions
- *
- *  Authors: Dietmar Decker <ddecker@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _EHCA_MRMW_H_
-#define _EHCA_MRMW_H_
-
-enum ehca_reg_type {
-	EHCA_REG_MR,
-	EHCA_REG_BUSMAP_MR
-};
-
-int ehca_reg_mr(struct ehca_shca *shca,
-		struct ehca_mr *e_mr,
-		u64 *iova_start,
-		u64 size,
-		int acl,
-		struct ehca_pd *e_pd,
-		struct ehca_mr_pginfo *pginfo,
-		u32 *lkey,
-		u32 *rkey,
-		enum ehca_reg_type reg_type);
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
-		       struct ehca_mr *e_mr,
-		       struct ehca_mr_pginfo *pginfo);
-
-int ehca_rereg_mr(struct ehca_shca *shca,
-		  struct ehca_mr *e_mr,
-		  u64 *iova_start,
-		  u64 size,
-		  int mr_access_flags,
-		  struct ehca_pd *e_pd,
-		  struct ehca_mr_pginfo *pginfo,
-		  u32 *lkey,
-		  u32 *rkey);
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
-		       struct ehca_mr *e_fmr);
-
-int ehca_reg_smr(struct ehca_shca *shca,
-		 struct ehca_mr *e_origmr,
-		 struct ehca_mr *e_newmr,
-		 u64 *iova_start,
-		 int acl,
-		 struct ehca_pd *e_pd,
-		 u32 *lkey,
-		 u32 *rkey);
-
-int ehca_reg_internal_maxmr(struct ehca_shca *shca,
-			    struct ehca_pd *e_pd,
-			    struct ehca_mr **maxmr);
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
-		   struct ehca_mr *e_newmr,
-		   u64 *iova_start,
-		   int acl,
-		   struct ehca_pd *e_pd,
-		   u32 *lkey,
-		   u32 *rkey);
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
-
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
-			     u64 *page_list,
-			     int list_len);
-
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
-		     u32 number,
-		     u64 *kpage);
-
-int ehca_mr_is_maxmr(u64 size,
-		     u64 *iova_start);
-
-void ehca_mrmw_map_acl(int ib_acl,
-		       u32 *hipz_acl);
-
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
-
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
-			       int *ib_acl);
-
-void ehca_mr_deletenew(struct ehca_mr *mr);
-
-int ehca_create_busmap(void);
-
-void ehca_destroy_busmap(void);
-
-extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
-#endif  /*_EHCA_MRMW_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
deleted file mode 100644
index 2a8aae4..0000000
--- a/drivers/staging/rdma/ehca/ehca_pd.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  PD functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-
-static struct kmem_cache *pd_cache;
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
-			    struct ib_ucontext *context, struct ib_udata *udata)
-{
-	struct ehca_pd *pd;
-	int i;
-
-	pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
-	if (!pd) {
-		ehca_err(device, "device=%p context=%p out of memory",
-			 device, context);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	for (i = 0; i < 2; i++) {
-		INIT_LIST_HEAD(&pd->free[i]);
-		INIT_LIST_HEAD(&pd->full[i]);
-	}
-	mutex_init(&pd->lock);
-
-	/*
-	 * Kernel PD: when device = -1, 0
-	 * User   PD: when context != -1
-	 */
-	if (!context) {
-		/*
-		 * Kernel PDs after init reuses always
-		 * the one created in ehca_shca_reopen()
-		 */
-		struct ehca_shca *shca = container_of(device, struct ehca_shca,
-						      ib_device);
-		pd->fw_pd.value = shca->pd->fw_pd.value;
-	} else
-		pd->fw_pd.value = (u64)pd;
-
-	return &pd->ib_pd;
-}
-
-int ehca_dealloc_pd(struct ib_pd *pd)
-{
-	struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
-	int i, leftovers = 0;
-	struct ipz_small_queue_page *page, *tmp;
-
-	for (i = 0; i < 2; i++) {
-		list_splice(&my_pd->full[i], &my_pd->free[i]);
-		list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
-			leftovers = 1;
-			free_page(page->page);
-			kmem_cache_free(small_qp_cache, page);
-		}
-	}
-
-	if (leftovers)
-		ehca_warn(pd->device,
-			  "Some small queue pages were not freed");
-
-	kmem_cache_free(pd_cache, my_pd);
-
-	return 0;
-}
-
-int ehca_init_pd_cache(void)
-{
-	pd_cache = kmem_cache_create("ehca_cache_pd",
-				     sizeof(struct ehca_pd), 0,
-				     SLAB_HWCACHE_ALIGN,
-				     NULL);
-	if (!pd_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void ehca_cleanup_pd_cache(void)
-{
-	kmem_cache_destroy(pd_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
deleted file mode 100644
index 90c4efa..0000000
--- a/drivers/staging/rdma/ehca/ehca_qes.h
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Hardware request structures
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _EHCA_QES_H_
-#define _EHCA_QES_H_
-
-#include "ehca_tools.h"
-
-/* virtual scatter gather entry to specify remote addresses with length */
-struct ehca_vsgentry {
-	u64 vaddr;
-	u32 lkey;
-	u32 length;
-};
-
-#define GRH_FLAG_MASK        EHCA_BMASK_IBM( 7,  7)
-#define GRH_IPVERSION_MASK   EHCA_BMASK_IBM( 0,  3)
-#define GRH_TCLASS_MASK      EHCA_BMASK_IBM( 4, 12)
-#define GRH_FLOWLABEL_MASK   EHCA_BMASK_IBM(13, 31)
-#define GRH_PAYLEN_MASK      EHCA_BMASK_IBM(32, 47)
-#define GRH_NEXTHEADER_MASK  EHCA_BMASK_IBM(48, 55)
-#define GRH_HOPLIMIT_MASK    EHCA_BMASK_IBM(56, 63)
-
-/*
- * Unreliable Datagram Address Vector Format
- * see IBTA Vol1 chapter 8.3 Global Routing Header
- */
-struct ehca_ud_av {
-	u8 sl;
-	u8 lnh;
-	u16 dlid;
-	u8 reserved1;
-	u8 reserved2;
-	u8 reserved3;
-	u8 slid_path_bits;
-	u8 reserved4;
-	u8 ipd;
-	u8 reserved5;
-	u8 pmtu;
-	u32 reserved6;
-	u64 reserved7;
-	union {
-		struct {
-			u64 word_0; /* always set to 6  */
-			/*should be 0x1B for IB transport */
-			u64 word_1;
-			u64 word_2;
-			u64 word_3;
-			u64 word_4;
-		} grh;
-		struct {
-			u32 wd_0;
-			u32 wd_1;
-			/* DWord_1 --> SGID */
-
-			u32 sgid_wd3;
-			u32 sgid_wd2;
-
-			u32 sgid_wd1;
-			u32 sgid_wd0;
-			/* DWord_3 --> DGID */
-
-			u32 dgid_wd3;
-			u32 dgid_wd2;
-
-			u32 dgid_wd1;
-			u32 dgid_wd0;
-		} grh_l;
-	};
-};
-
-/* maximum number of sg entries allowed in a WQE */
-#define MAX_WQE_SG_ENTRIES 252
-
-#define WQE_OPTYPE_SEND             0x80
-#define WQE_OPTYPE_RDMAREAD         0x40
-#define WQE_OPTYPE_RDMAWRITE        0x20
-#define WQE_OPTYPE_CMPSWAP          0x10
-#define WQE_OPTYPE_FETCHADD         0x08
-#define WQE_OPTYPE_BIND             0x04
-
-#define WQE_WRFLAG_REQ_SIGNAL_COM   0x80
-#define WQE_WRFLAG_FENCE            0x40
-#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
-#define WQE_WRFLAG_SOLIC_EVENT      0x10
-
-#define WQEF_CACHE_HINT             0x80
-#define WQEF_CACHE_HINT_RD_WR       0x40
-#define WQEF_TIMED_WQE              0x20
-#define WQEF_PURGE                  0x08
-#define WQEF_HIGH_NIBBLE            0xF0
-
-#define MW_BIND_ACCESSCTRL_R_WRITE   0x40
-#define MW_BIND_ACCESSCTRL_R_READ    0x20
-#define MW_BIND_ACCESSCTRL_R_ATOMIC  0x10
-
-struct ehca_wqe {
-	u64 work_request_id;
-	u8 optype;
-	u8 wr_flag;
-	u16 pkeyi;
-	u8 wqef;
-	u8 nr_of_data_seg;
-	u16 wqe_provided_slid;
-	u32 destination_qp_number;
-	u32 resync_psn_sqp;
-	u32 local_ee_context_qkey;
-	u32 immediate_data;
-	union {
-		struct {
-			u64 remote_virtual_address;
-			u32 rkey;
-			u32 reserved;
-			u64 atomic_1st_op_dma_len;
-			u64 atomic_2nd_op;
-			struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-
-		} nud;
-		struct {
-			u64 ehca_ud_av_ptr;
-			u64 reserved1;
-			u64 reserved2;
-			u64 reserved3;
-			struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-		} ud_avp;
-		struct {
-			struct ehca_ud_av ud_av;
-			struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
-						     2];
-		} ud_av;
-		struct {
-			u64 reserved0;
-			u64 reserved1;
-			u64 reserved2;
-			u64 reserved3;
-			struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-		} all_rcv;
-
-		struct {
-			u64 reserved;
-			u32 rkey;
-			u32 old_rkey;
-			u64 reserved1;
-			u64 reserved2;
-			u64 virtual_address;
-			u32 reserved3;
-			u32 length;
-			u32 reserved4;
-			u16 reserved5;
-			u8 reserved6;
-			u8 lr_ctl;
-			u32 lkey;
-			u32 reserved7;
-			u64 reserved8;
-			u64 reserved9;
-			u64 reserved10;
-			u64 reserved11;
-		} bind;
-		struct {
-			u64 reserved12;
-			u64 reserved13;
-			u32 size;
-			u32 start;
-		} inline_data;
-	} u;
-
-};
-
-#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
-#define WC_IMM_DATA     EHCA_BMASK_IBM(1, 1)
-#define WC_GRH_PRESENT  EHCA_BMASK_IBM(2, 2)
-#define WC_SE_BIT       EHCA_BMASK_IBM(3, 3)
-#define WC_STATUS_ERROR_BIT 0x80000000
-#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
-#define WC_STATUS_PURGE_BIT 0x10
-#define WC_SEND_RECEIVE_BIT 0x80
-
-struct ehca_cqe {
-	u64 work_request_id;
-	u8 optype;
-	u8 w_completion_flags;
-	u16 reserved1;
-	u32 nr_bytes_transferred;
-	u32 immediate_data;
-	u32 local_qp_number;
-	u8 freed_resource_count;
-	u8 service_level;
-	u16 wqe_count;
-	u32 qp_token;
-	u32 qkey_ee_token;
-	u32 remote_qp_number;
-	u16 dlid;
-	u16 rlid;
-	u16 reserved2;
-	u16 pkey_index;
-	u32 cqe_timestamp;
-	u32 wqe_timestamp;
-	u8 wqe_timestamp_valid;
-	u8 reserved3;
-	u8 reserved4;
-	u8 cqe_flags;
-	u32 status;
-};
-
-struct ehca_eqe {
-	u64 entry;
-};
-
-struct ehca_mrte {
-	u64 starting_va;
-	u64 length; /* length of memory region in bytes*/
-	u32 pd;
-	u8 key_instance;
-	u8 pagesize;
-	u8 mr_control;
-	u8 local_remote_access_ctrl;
-	u8 reserved[0x20 - 0x18];
-	u64 at_pointer[4];
-};
-#endif /*_EHCA_QES_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
deleted file mode 100644
index 896c01f..0000000
--- a/drivers/staging/rdma/ehca/ehca_qp.c
+++ /dev/null
@@ -1,2256 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  QP functions
- *
- *  Authors: Joachim Fenkes <fenkes@de.ibm.com>
- *           Stefan Roscher <stefan.roscher@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-static struct kmem_cache *qp_cache;
-
-/*
- * attributes not supported by query qp
- */
-#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS       | \
-				     IB_QP_EN_SQD_ASYNC_NOTIFY)
-
-/*
- * ehca (internal) qp state values
- */
-enum ehca_qp_state {
-	EHCA_QPS_RESET = 1,
-	EHCA_QPS_INIT = 2,
-	EHCA_QPS_RTR = 3,
-	EHCA_QPS_RTS = 5,
-	EHCA_QPS_SQD = 6,
-	EHCA_QPS_SQE = 8,
-	EHCA_QPS_ERR = 128
-};
-
-/*
- * qp state transitions as defined by IB Arch Rel 1.1 page 431
- */
-enum ib_qp_statetrans {
-	IB_QPST_ANY2RESET,
-	IB_QPST_ANY2ERR,
-	IB_QPST_RESET2INIT,
-	IB_QPST_INIT2RTR,
-	IB_QPST_INIT2INIT,
-	IB_QPST_RTR2RTS,
-	IB_QPST_RTS2SQD,
-	IB_QPST_RTS2RTS,
-	IB_QPST_SQD2RTS,
-	IB_QPST_SQE2RTS,
-	IB_QPST_SQD2SQD,
-	IB_QPST_MAX	/* nr of transitions, this must be last!!! */
-};
-
-/*
- * ib2ehca_qp_state maps IB to ehca qp_state
- * returns ehca qp state corresponding to given ib qp state
- */
-static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
-{
-	switch (ib_qp_state) {
-	case IB_QPS_RESET:
-		return EHCA_QPS_RESET;
-	case IB_QPS_INIT:
-		return EHCA_QPS_INIT;
-	case IB_QPS_RTR:
-		return EHCA_QPS_RTR;
-	case IB_QPS_RTS:
-		return EHCA_QPS_RTS;
-	case IB_QPS_SQD:
-		return EHCA_QPS_SQD;
-	case IB_QPS_SQE:
-		return EHCA_QPS_SQE;
-	case IB_QPS_ERR:
-		return EHCA_QPS_ERR;
-	default:
-		ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
-		return -EINVAL;
-	}
-}
-
-/*
- * ehca2ib_qp_state maps ehca to IB qp_state
- * returns ib qp state corresponding to given ehca qp state
- */
-static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
-						ehca_qp_state)
-{
-	switch (ehca_qp_state) {
-	case EHCA_QPS_RESET:
-		return IB_QPS_RESET;
-	case EHCA_QPS_INIT:
-		return IB_QPS_INIT;
-	case EHCA_QPS_RTR:
-		return IB_QPS_RTR;
-	case EHCA_QPS_RTS:
-		return IB_QPS_RTS;
-	case EHCA_QPS_SQD:
-		return IB_QPS_SQD;
-	case EHCA_QPS_SQE:
-		return IB_QPS_SQE;
-	case EHCA_QPS_ERR:
-		return IB_QPS_ERR;
-	default:
-		ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
-		return -EINVAL;
-	}
-}
-
-/*
- * ehca_qp_type used as index for req_attr and opt_attr of
- * struct ehca_modqp_statetrans
- */
-enum ehca_qp_type {
-	QPT_RC = 0,
-	QPT_UC = 1,
-	QPT_UD = 2,
-	QPT_SQP = 3,
-	QPT_MAX
-};
-
-/*
- * ib2ehcaqptype maps Ib to ehca qp_type
- * returns ehca qp type corresponding to ib qp type
- */
-static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
-{
-	switch (ibqptype) {
-	case IB_QPT_SMI:
-	case IB_QPT_GSI:
-		return QPT_SQP;
-	case IB_QPT_RC:
-		return QPT_RC;
-	case IB_QPT_UC:
-		return QPT_UC;
-	case IB_QPT_UD:
-		return QPT_UD;
-	default:
-		ehca_gen_err("Invalid ibqptype=%x", ibqptype);
-		return -EINVAL;
-	}
-}
-
-static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
-							 int ib_tostate)
-{
-	int index = -EINVAL;
-	switch (ib_tostate) {
-	case IB_QPS_RESET:
-		index = IB_QPST_ANY2RESET;
-		break;
-	case IB_QPS_INIT:
-		switch (ib_fromstate) {
-		case IB_QPS_RESET:
-			index = IB_QPST_RESET2INIT;
-			break;
-		case IB_QPS_INIT:
-			index = IB_QPST_INIT2INIT;
-			break;
-		}
-		break;
-	case IB_QPS_RTR:
-		if (ib_fromstate == IB_QPS_INIT)
-			index = IB_QPST_INIT2RTR;
-		break;
-	case IB_QPS_RTS:
-		switch (ib_fromstate) {
-		case IB_QPS_RTR:
-			index = IB_QPST_RTR2RTS;
-			break;
-		case IB_QPS_RTS:
-			index = IB_QPST_RTS2RTS;
-			break;
-		case IB_QPS_SQD:
-			index = IB_QPST_SQD2RTS;
-			break;
-		case IB_QPS_SQE:
-			index = IB_QPST_SQE2RTS;
-			break;
-		}
-		break;
-	case IB_QPS_SQD:
-		if (ib_fromstate == IB_QPS_RTS)
-			index = IB_QPST_RTS2SQD;
-		break;
-	case IB_QPS_SQE:
-		break;
-	case IB_QPS_ERR:
-		index = IB_QPST_ANY2ERR;
-		break;
-	default:
-		break;
-	}
-	return index;
-}
-
-/*
- * ibqptype2servicetype returns hcp service type corresponding to given
- * ib qp type used by create_qp()
- */
-static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
-{
-	switch (ibqptype) {
-	case IB_QPT_SMI:
-	case IB_QPT_GSI:
-		return ST_UD;
-	case IB_QPT_RC:
-		return ST_RC;
-	case IB_QPT_UC:
-		return ST_UC;
-	case IB_QPT_UD:
-		return ST_UD;
-	case IB_QPT_RAW_IPV6:
-		return -EINVAL;
-	case IB_QPT_RAW_ETHERTYPE:
-		return -EINVAL;
-	default:
-		ehca_gen_err("Invalid ibqptype=%x", ibqptype);
-		return -EINVAL;
-	}
-}
-
-/*
- * init userspace queue info from ipz_queue data
- */
-static inline void queue2resp(struct ipzu_queue_resp *resp,
-			      struct ipz_queue *queue)
-{
-	resp->qe_size = queue->qe_size;
-	resp->act_nr_of_sg = queue->act_nr_of_sg;
-	resp->queue_length = queue->queue_length;
-	resp->pagesize = queue->pagesize;
-	resp->toggle_state = queue->toggle_state;
-	resp->offset = queue->offset;
-}
-
-/*
- * init_qp_queue initializes/constructs r/squeue and registers queue pages.
- */
-static inline int init_qp_queue(struct ehca_shca *shca,
-				struct ehca_pd *pd,
-				struct ehca_qp *my_qp,
-				struct ipz_queue *queue,
-				int q_type,
-				u64 expected_hret,
-				struct ehca_alloc_queue_parms *parms,
-				int wqe_size)
-{
-	int ret, cnt, ipz_rc, nr_q_pages;
-	void *vpage;
-	u64 rpage, h_ret;
-	struct ib_device *ib_dev = &shca->ib_device;
-	struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
-
-	if (!parms->queue_size)
-		return 0;
-
-	if (parms->is_small) {
-		nr_q_pages = 1;
-		ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
-					128 << parms->page_size,
-					wqe_size, parms->act_nr_sges, 1);
-	} else {
-		nr_q_pages = parms->queue_size;
-		ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
-					EHCA_PAGESIZE, wqe_size,
-					parms->act_nr_sges, 0);
-	}
-
-	if (!ipz_rc) {
-		ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
-			 ipz_rc);
-		return -EBUSY;
-	}
-
-	/* register queue pages */
-	for (cnt = 0; cnt < nr_q_pages; cnt++) {
-		vpage = ipz_qpageit_get_inc(queue);
-		if (!vpage) {
-			ehca_err(ib_dev, "ipz_qpageit_get_inc() "
-				 "failed p_vpage= %p", vpage);
-			ret = -EINVAL;
-			goto init_qp_queue1;
-		}
-		rpage = __pa(vpage);
-
-		h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
-						 my_qp->ipz_qp_handle,
-						 NULL, 0, q_type,
-						 rpage, parms->is_small ? 0 : 1,
-						 my_qp->galpas.kernel);
-		if (cnt == (nr_q_pages - 1)) {	/* last page! */
-			if (h_ret != expected_hret) {
-				ehca_err(ib_dev, "hipz_qp_register_rpage() "
-					 "h_ret=%lli", h_ret);
-				ret = ehca2ib_return_code(h_ret);
-				goto init_qp_queue1;
-			}
-			vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
-			if (vpage) {
-				ehca_err(ib_dev, "ipz_qpageit_get_inc() "
-					 "should not succeed vpage=%p", vpage);
-				ret = -EINVAL;
-				goto init_qp_queue1;
-			}
-		} else {
-			if (h_ret != H_PAGE_REGISTERED) {
-				ehca_err(ib_dev, "hipz_qp_register_rpage() "
-					 "h_ret=%lli", h_ret);
-				ret = ehca2ib_return_code(h_ret);
-				goto init_qp_queue1;
-			}
-		}
-	}
-
-	ipz_qeit_reset(queue);
-
-	return 0;
-
-init_qp_queue1:
-	ipz_queue_dtor(pd, queue);
-	return ret;
-}
-
-static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
-{
-	if (is_llqp)
-		return 128 << act_nr_sge;
-	else
-		return offsetof(struct ehca_wqe,
-				u.nud.sg_list[act_nr_sge]);
-}
-
-static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
-				       int req_nr_sge, int is_llqp)
-{
-	u32 wqe_size, q_size;
-	int act_nr_sge = req_nr_sge;
-
-	if (!is_llqp)
-		/* round up #SGEs so WQE size is a power of 2 */
-		for (act_nr_sge = 4; act_nr_sge <= 252;
-		     act_nr_sge = 4 + 2 * act_nr_sge)
-			if (act_nr_sge >= req_nr_sge)
-				break;
-
-	wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
-	q_size = wqe_size * (queue->max_wr + 1);
-
-	if (q_size <= 512)
-		queue->page_size = 2;
-	else if (q_size <= 1024)
-		queue->page_size = 3;
-	else
-		queue->page_size = 0;
-
-	queue->is_small = (queue->page_size != 0);
-}
-
-/* needs to be called with cq->spinlock held */
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
-{
-	struct list_head *list, *node;
-
-	/* TODO: support low latency QPs */
-	if (qp->ext_type == EQPT_LLQP)
-		return;
-
-	if (on_sq) {
-		list = &qp->send_cq->sqp_err_list;
-		node = &qp->sq_err_node;
-	} else {
-		list = &qp->recv_cq->rqp_err_list;
-		node = &qp->rq_err_node;
-	}
-
-	if (list_empty(node))
-		list_add_tail(node, list);
-
-	return;
-}
-
-static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cq->spinlock, flags);
-
-	if (!list_empty(node))
-		list_del_init(node);
-
-	spin_unlock_irqrestore(&cq->spinlock, flags);
-}
-
-static void reset_queue_map(struct ehca_queue_map *qmap)
-{
-	int i;
-
-	qmap->tail = qmap->entries - 1;
-	qmap->left_to_poll = 0;
-	qmap->next_wqe_idx = 0;
-	for (i = 0; i < qmap->entries; i++) {
-		qmap->map[i].reported = 1;
-		qmap->map[i].cqe_req = 0;
-	}
-}
-
-/*
- * Create an ib_qp struct that is either a QP or an SRQ, depending on
- * the value of the is_srq parameter. If init_attr and srq_init_attr share
- * fields, the field out of init_attr is used.
- */
-static struct ehca_qp *internal_create_qp(
-	struct ib_pd *pd,
-	struct ib_qp_init_attr *init_attr,
-	struct ib_srq_init_attr *srq_init_attr,
-	struct ib_udata *udata, int is_srq)
-{
-	struct ehca_qp *my_qp, *my_srq = NULL;
-	struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
-	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-					      ib_device);
-	struct ib_ucontext *context = NULL;
-	u64 h_ret;
-	int is_llqp = 0, has_srq = 0, is_user = 0;
-	int qp_type, max_send_sge, max_recv_sge, ret;
-
-	/* h_call's out parameters */
-	struct ehca_alloc_qp_parms parms;
-	u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
-	unsigned long flags;
-
-	if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
-		ehca_err(pd->device, "Unable to create QP, max number of %i "
-			 "QPs reached.", shca->max_num_qps);
-		ehca_err(pd->device, "To increase the maximum number of QPs "
-			 "use the number_of_qps module parameter.\n");
-		return ERR_PTR(-ENOSPC);
-	}
-
-	if (init_attr->create_flags) {
-		atomic_dec(&shca->num_qps);
-		return ERR_PTR(-EINVAL);
-	}
-
-	memset(&parms, 0, sizeof(parms));
-	qp_type = init_attr->qp_type;
-
-	if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
-		init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
-		ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
-			 init_attr->sq_sig_type);
-		atomic_dec(&shca->num_qps);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* save LLQP info */
-	if (qp_type & 0x80) {
-		is_llqp = 1;
-		parms.ext_type = EQPT_LLQP;
-		parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
-	}
-	qp_type &= 0x1F;
-	init_attr->qp_type &= 0x1F;
-
-	/* handle SRQ base QPs */
-	if (init_attr->srq) {
-		my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
-
-		if (qp_type == IB_QPT_UC) {
-			ehca_err(pd->device, "UC with SRQ not supported");
-			atomic_dec(&shca->num_qps);
-			return ERR_PTR(-EINVAL);
-		}
-
-		has_srq = 1;
-		parms.ext_type = EQPT_SRQBASE;
-		parms.srq_qpn = my_srq->real_qp_num;
-	}
-
-	if (is_llqp && has_srq) {
-		ehca_err(pd->device, "LLQPs can't have an SRQ");
-		atomic_dec(&shca->num_qps);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* handle SRQs */
-	if (is_srq) {
-		parms.ext_type = EQPT_SRQ;
-		parms.srq_limit = srq_init_attr->attr.srq_limit;
-		if (init_attr->cap.max_recv_sge > 3) {
-			ehca_err(pd->device, "no more than three SGEs "
-				 "supported for SRQ  pd=%p  max_sge=%x",
-				 pd, init_attr->cap.max_recv_sge);
-			atomic_dec(&shca->num_qps);
-			return ERR_PTR(-EINVAL);
-		}
-	}
-
-	/* check QP type */
-	if (qp_type != IB_QPT_UD &&
-	    qp_type != IB_QPT_UC &&
-	    qp_type != IB_QPT_RC &&
-	    qp_type != IB_QPT_SMI &&
-	    qp_type != IB_QPT_GSI) {
-		ehca_err(pd->device, "wrong QP Type=%x", qp_type);
-		atomic_dec(&shca->num_qps);
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (is_llqp) {
-		switch (qp_type) {
-		case IB_QPT_RC:
-			if ((init_attr->cap.max_send_wr > 255) ||
-			    (init_attr->cap.max_recv_wr > 255)) {
-				ehca_err(pd->device,
-					 "Invalid Number of max_sq_wr=%x "
-					 "or max_rq_wr=%x for RC LLQP",
-					 init_attr->cap.max_send_wr,
-					 init_attr->cap.max_recv_wr);
-				atomic_dec(&shca->num_qps);
-				return ERR_PTR(-EINVAL);
-			}
-			break;
-		case IB_QPT_UD:
-			if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
-				ehca_err(pd->device, "UD LLQP not supported "
-					 "by this adapter");
-				atomic_dec(&shca->num_qps);
-				return ERR_PTR(-ENOSYS);
-			}
-			if (!(init_attr->cap.max_send_sge <= 5
-			    && init_attr->cap.max_send_sge >= 1
-			    && init_attr->cap.max_recv_sge <= 5
-			    && init_attr->cap.max_recv_sge >= 1)) {
-				ehca_err(pd->device,
-					 "Invalid Number of max_send_sge=%x "
-					 "or max_recv_sge=%x for UD LLQP",
-					 init_attr->cap.max_send_sge,
-					 init_attr->cap.max_recv_sge);
-				atomic_dec(&shca->num_qps);
-				return ERR_PTR(-EINVAL);
-			} else if (init_attr->cap.max_send_wr > 255) {
-				ehca_err(pd->device,
-					 "Invalid Number of "
-					 "max_send_wr=%x for UD QP_TYPE=%x",
-					 init_attr->cap.max_send_wr, qp_type);
-				atomic_dec(&shca->num_qps);
-				return ERR_PTR(-EINVAL);
-			}
-			break;
-		default:
-			ehca_err(pd->device, "unsupported LL QP Type=%x",
-				 qp_type);
-			atomic_dec(&shca->num_qps);
-			return ERR_PTR(-EINVAL);
-		}
-	} else {
-		int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
-			       || qp_type == IB_QPT_GSI) ? 250 : 252;
-
-		if (init_attr->cap.max_send_sge > max_sge
-		    || init_attr->cap.max_recv_sge > max_sge) {
-			ehca_err(pd->device, "Invalid number of SGEs requested "
-				 "send_sge=%x recv_sge=%x max_sge=%x",
-				 init_attr->cap.max_send_sge,
-				 init_attr->cap.max_recv_sge, max_sge);
-			atomic_dec(&shca->num_qps);
-			return ERR_PTR(-EINVAL);
-		}
-	}
-
-	my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
-	if (!my_qp) {
-		ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
-		atomic_dec(&shca->num_qps);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (pd->uobject && udata) {
-		is_user = 1;
-		context = pd->uobject->context;
-	}
-
-	atomic_set(&my_qp->nr_events, 0);
-	init_waitqueue_head(&my_qp->wait_completion);
-	spin_lock_init(&my_qp->spinlock_s);
-	spin_lock_init(&my_qp->spinlock_r);
-	my_qp->qp_type = qp_type;
-	my_qp->ext_type = parms.ext_type;
-	my_qp->state = IB_QPS_RESET;
-
-	if (init_attr->recv_cq)
-		my_qp->recv_cq =
-			container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
-	if (init_attr->send_cq)
-		my_qp->send_cq =
-			container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
-
-	idr_preload(GFP_KERNEL);
-	write_lock_irqsave(&ehca_qp_idr_lock, flags);
-
-	ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
-	if (ret >= 0)
-		my_qp->token = ret;
-
-	write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-	idr_preload_end();
-	if (ret < 0) {
-		if (ret == -ENOSPC) {
-			ret = -EINVAL;
-			ehca_err(pd->device, "Invalid number of qp");
-		} else {
-			ret = -ENOMEM;
-			ehca_err(pd->device, "Can't allocate new idr entry.");
-		}
-		goto create_qp_exit0;
-	}
-
-	if (has_srq)
-		parms.srq_token = my_qp->token;
-
-	parms.servicetype = ibqptype2servicetype(qp_type);
-	if (parms.servicetype < 0) {
-		ret = -EINVAL;
-		ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
-		goto create_qp_exit1;
-	}
-
-	/* Always signal by WQE so we can hide circ. WQEs */
-	parms.sigtype = HCALL_SIGT_BY_WQE;
-
-	/* UD_AV CIRCUMVENTION */
-	max_send_sge = init_attr->cap.max_send_sge;
-	max_recv_sge = init_attr->cap.max_recv_sge;
-	if (parms.servicetype == ST_UD && !is_llqp) {
-		max_send_sge += 2;
-		max_recv_sge += 2;
-	}
-
-	parms.token = my_qp->token;
-	parms.eq_handle = shca->eq.ipz_eq_handle;
-	parms.pd = my_pd->fw_pd;
-	if (my_qp->send_cq)
-		parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
-	if (my_qp->recv_cq)
-		parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
-
-	parms.squeue.max_wr = init_attr->cap.max_send_wr;
-	parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
-	parms.squeue.max_sge = max_send_sge;
-	parms.rqueue.max_sge = max_recv_sge;
-
-	/* RC QPs need one more SWQE for unsolicited ack circumvention */
-	if (qp_type == IB_QPT_RC)
-		parms.squeue.max_wr++;
-
-	if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
-		if (HAS_SQ(my_qp))
-			ehca_determine_small_queue(
-				&parms.squeue, max_send_sge, is_llqp);
-		if (HAS_RQ(my_qp))
-			ehca_determine_small_queue(
-				&parms.rqueue, max_recv_sge, is_llqp);
-		parms.qp_storage =
-			(parms.squeue.is_small || parms.rqueue.is_small);
-	}
-
-	h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
-			 h_ret);
-		ret = ehca2ib_return_code(h_ret);
-		goto create_qp_exit1;
-	}
-
-	ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
-	my_qp->ipz_qp_handle = parms.qp_handle;
-	my_qp->galpas = parms.galpas;
-
-	swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
-	rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
-
-	switch (qp_type) {
-	case IB_QPT_RC:
-		if (is_llqp) {
-			parms.squeue.act_nr_sges = 1;
-			parms.rqueue.act_nr_sges = 1;
-		}
-		/* hide the extra WQE */
-		parms.squeue.act_nr_wqes--;
-		break;
-	case IB_QPT_UD:
-	case IB_QPT_GSI:
-	case IB_QPT_SMI:
-		/* UD circumvention */
-		if (is_llqp) {
-			parms.squeue.act_nr_sges = 1;
-			parms.rqueue.act_nr_sges = 1;
-		} else {
-			parms.squeue.act_nr_sges -= 2;
-			parms.rqueue.act_nr_sges -= 2;
-		}
-
-		if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
-			parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
-			parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
-			parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
-			parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
-			ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
-		}
-
-		break;
-
-	default:
-		break;
-	}
-
-	/* initialize r/squeue and register queue pages */
-	if (HAS_SQ(my_qp)) {
-		ret = init_qp_queue(
-			shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
-			HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
-			&parms.squeue, swqe_size);
-		if (ret) {
-			ehca_err(pd->device, "Couldn't initialize squeue "
-				 "and pages ret=%i", ret);
-			goto create_qp_exit2;
-		}
-
-		if (!is_user) {
-			my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
-				my_qp->ipz_squeue.qe_size;
-			my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
-						    sizeof(struct ehca_qmap_entry));
-			if (!my_qp->sq_map.map) {
-				ehca_err(pd->device, "Couldn't allocate squeue "
-					 "map ret=%i", ret);
-				goto create_qp_exit3;
-			}
-			INIT_LIST_HEAD(&my_qp->sq_err_node);
-			/* to avoid the generation of bogus flush CQEs */
-			reset_queue_map(&my_qp->sq_map);
-		}
-	}
-
-	if (HAS_RQ(my_qp)) {
-		ret = init_qp_queue(
-			shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
-			H_SUCCESS, &parms.rqueue, rwqe_size);
-		if (ret) {
-			ehca_err(pd->device, "Couldn't initialize rqueue "
-				 "and pages ret=%i", ret);
-			goto create_qp_exit4;
-		}
-		if (!is_user) {
-			my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
-				my_qp->ipz_rqueue.qe_size;
-			my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
-						    sizeof(struct ehca_qmap_entry));
-			if (!my_qp->rq_map.map) {
-				ehca_err(pd->device, "Couldn't allocate squeue "
-					 "map ret=%i", ret);
-				goto create_qp_exit5;
-			}
-			INIT_LIST_HEAD(&my_qp->rq_err_node);
-			/* to avoid the generation of bogus flush CQEs */
-			reset_queue_map(&my_qp->rq_map);
-		}
-	} else if (init_attr->srq && !is_user) {
-		/* this is a base QP, use the queue map of the SRQ */
-		my_qp->rq_map = my_srq->rq_map;
-		INIT_LIST_HEAD(&my_qp->rq_err_node);
-
-		my_qp->ipz_rqueue = my_srq->ipz_rqueue;
-	}
-
-	if (is_srq) {
-		my_qp->ib_srq.pd = &my_pd->ib_pd;
-		my_qp->ib_srq.device = my_pd->ib_pd.device;
-
-		my_qp->ib_srq.srq_context = init_attr->qp_context;
-		my_qp->ib_srq.event_handler = init_attr->event_handler;
-	} else {
-		my_qp->ib_qp.qp_num = ib_qp_num;
-		my_qp->ib_qp.pd = &my_pd->ib_pd;
-		my_qp->ib_qp.device = my_pd->ib_pd.device;
-
-		my_qp->ib_qp.recv_cq = init_attr->recv_cq;
-		my_qp->ib_qp.send_cq = init_attr->send_cq;
-
-		my_qp->ib_qp.qp_type = qp_type;
-		my_qp->ib_qp.srq = init_attr->srq;
-
-		my_qp->ib_qp.qp_context = init_attr->qp_context;
-		my_qp->ib_qp.event_handler = init_attr->event_handler;
-	}
-
-	init_attr->cap.max_inline_data = 0; /* not supported yet */
-	init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
-	init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
-	init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
-	init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
-	my_qp->init_attr = *init_attr;
-
-	if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-		shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
-			&my_qp->ib_qp;
-		if (ehca_nr_ports < 0) {
-			/* alloc array to cache subsequent modify qp parms
-			 * for autodetect mode
-			 */
-			my_qp->mod_qp_parm =
-				kzalloc(EHCA_MOD_QP_PARM_MAX *
-					sizeof(*my_qp->mod_qp_parm),
-					GFP_KERNEL);
-			if (!my_qp->mod_qp_parm) {
-				ehca_err(pd->device,
-					 "Could not alloc mod_qp_parm");
-				goto create_qp_exit5;
-			}
-		}
-	}
-
-	/* NOTE: define_apq0() not supported yet */
-	if (qp_type == IB_QPT_GSI) {
-		h_ret = ehca_define_sqp(shca, my_qp, init_attr);
-		if (h_ret != H_SUCCESS) {
-			kfree(my_qp->mod_qp_parm);
-			my_qp->mod_qp_parm = NULL;
-			/* the QP pointer is no longer valid */
-			shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
-				NULL;
-			ret = ehca2ib_return_code(h_ret);
-			goto create_qp_exit6;
-		}
-	}
-
-	if (my_qp->send_cq) {
-		ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
-		if (ret) {
-			ehca_err(pd->device,
-				 "Couldn't assign qp to send_cq ret=%i", ret);
-			goto create_qp_exit7;
-		}
-	}
-
-	/* copy queues, galpa data to user space */
-	if (context && udata) {
-		struct ehca_create_qp_resp resp;
-		memset(&resp, 0, sizeof(resp));
-
-		resp.qp_num = my_qp->real_qp_num;
-		resp.token = my_qp->token;
-		resp.qp_type = my_qp->qp_type;
-		resp.ext_type = my_qp->ext_type;
-		resp.qkey = my_qp->qkey;
-		resp.real_qp_num = my_qp->real_qp_num;
-
-		if (HAS_SQ(my_qp))
-			queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
-		if (HAS_RQ(my_qp))
-			queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
-		resp.fw_handle_ofs = (u32)
-			(my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
-
-		if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
-			ehca_err(pd->device, "Copy to udata failed");
-			ret = -EINVAL;
-			goto create_qp_exit8;
-		}
-	}
-
-	return my_qp;
-
-create_qp_exit8:
-	ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-
-create_qp_exit7:
-	kfree(my_qp->mod_qp_parm);
-
-create_qp_exit6:
-	if (HAS_RQ(my_qp) && !is_user)
-		vfree(my_qp->rq_map.map);
-
-create_qp_exit5:
-	if (HAS_RQ(my_qp))
-		ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-
-create_qp_exit4:
-	if (HAS_SQ(my_qp) && !is_user)
-		vfree(my_qp->sq_map.map);
-
-create_qp_exit3:
-	if (HAS_SQ(my_qp))
-		ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-
-create_qp_exit2:
-	hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
-
-create_qp_exit1:
-	write_lock_irqsave(&ehca_qp_idr_lock, flags);
-	idr_remove(&ehca_qp_idr, my_qp->token);
-	write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
-create_qp_exit0:
-	kmem_cache_free(qp_cache, my_qp);
-	atomic_dec(&shca->num_qps);
-	return ERR_PTR(ret);
-}
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
-			     struct ib_qp_init_attr *qp_init_attr,
-			     struct ib_udata *udata)
-{
-	struct ehca_qp *ret;
-
-	ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
-	return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
-			       struct ib_uobject *uobject);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
-			       struct ib_srq_init_attr *srq_init_attr,
-			       struct ib_udata *udata)
-{
-	struct ib_qp_init_attr qp_init_attr;
-	struct ehca_qp *my_qp;
-	struct ib_srq *ret;
-	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-					      ib_device);
-	struct hcp_modify_qp_control_block *mqpcb;
-	u64 hret, update_mask;
-
-	if (srq_init_attr->srq_type != IB_SRQT_BASIC)
-		return ERR_PTR(-ENOSYS);
-
-	/* For common attributes, internal_create_qp() takes its info
-	 * out of qp_init_attr, so copy all common attrs there.
-	 */
-	memset(&qp_init_attr, 0, sizeof(qp_init_attr));
-	qp_init_attr.event_handler = srq_init_attr->event_handler;
-	qp_init_attr.qp_context = srq_init_attr->srq_context;
-	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
-	qp_init_attr.qp_type = IB_QPT_RC;
-	qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
-	qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
-
-	my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
-	if (IS_ERR(my_qp))
-		return (struct ib_srq *)my_qp;
-
-	/* copy back return values */
-	srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
-	srq_init_attr->attr.max_sge = 3;
-
-	/* drive SRQ into RTR state */
-	mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!mqpcb) {
-		ehca_err(pd->device, "Could not get zeroed page for mqpcb "
-			 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
-		ret = ERR_PTR(-ENOMEM);
-		goto create_srq1;
-	}
-
-	mqpcb->qp_state = EHCA_QPS_INIT;
-	mqpcb->prim_phys_port = 1;
-	update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-	hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-				my_qp->ipz_qp_handle,
-				&my_qp->pf,
-				update_mask,
-				mqpcb, my_qp->galpas.kernel);
-	if (hret != H_SUCCESS) {
-		ehca_err(pd->device, "Could not modify SRQ to INIT "
-			 "ehca_qp=%p qp_num=%x h_ret=%lli",
-			 my_qp, my_qp->real_qp_num, hret);
-		goto create_srq2;
-	}
-
-	mqpcb->qp_enable = 1;
-	update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
-	hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-				my_qp->ipz_qp_handle,
-				&my_qp->pf,
-				update_mask,
-				mqpcb, my_qp->galpas.kernel);
-	if (hret != H_SUCCESS) {
-		ehca_err(pd->device, "Could not enable SRQ "
-			 "ehca_qp=%p qp_num=%x h_ret=%lli",
-			 my_qp, my_qp->real_qp_num, hret);
-		goto create_srq2;
-	}
-
-	mqpcb->qp_state  = EHCA_QPS_RTR;
-	update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-	hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-				my_qp->ipz_qp_handle,
-				&my_qp->pf,
-				update_mask,
-				mqpcb, my_qp->galpas.kernel);
-	if (hret != H_SUCCESS) {
-		ehca_err(pd->device, "Could not modify SRQ to RTR "
-			 "ehca_qp=%p qp_num=%x h_ret=%lli",
-			 my_qp, my_qp->real_qp_num, hret);
-		goto create_srq2;
-	}
-
-	ehca_free_fw_ctrlblock(mqpcb);
-
-	return &my_qp->ib_srq;
-
-create_srq2:
-	ret = ERR_PTR(ehca2ib_return_code(hret));
-	ehca_free_fw_ctrlblock(mqpcb);
-
-create_srq1:
-	internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
-
-	return ret;
-}
-
-/*
- * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
- * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
- * returns total number of bad wqes in bad_wqe_cnt
- */
-static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
-			   int *bad_wqe_cnt)
-{
-	u64 h_ret;
-	struct ipz_queue *squeue;
-	void *bad_send_wqe_p, *bad_send_wqe_v;
-	u64 q_ofs;
-	struct ehca_wqe *wqe;
-	int qp_num = my_qp->ib_qp.qp_num;
-
-	/* get send wqe pointer */
-	h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
-					   my_qp->ipz_qp_handle, &my_qp->pf,
-					   &bad_send_wqe_p, NULL, 2);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
-			 " ehca_qp=%p qp_num=%x h_ret=%lli",
-			 my_qp, qp_num, h_ret);
-		return ehca2ib_return_code(h_ret);
-	}
-	bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
-	ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
-		 qp_num, bad_send_wqe_p);
-	/* convert wqe pointer to vadr */
-	bad_send_wqe_v = __va((u64)bad_send_wqe_p);
-	if (ehca_debug_level >= 2)
-		ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
-	squeue = &my_qp->ipz_squeue;
-	if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
-		ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
-			 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
-		return -EFAULT;
-	}
-
-	/* loop sets wqe's purge bit */
-	wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
-	*bad_wqe_cnt = 0;
-	while (wqe->optype != 0xff && wqe->wqef != 0xff) {
-		if (ehca_debug_level >= 2)
-			ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
-		wqe->nr_of_data_seg = 0; /* suppress data access */
-		wqe->wqef = WQEF_PURGE; /* WQE to be purged */
-		q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
-		wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
-		*bad_wqe_cnt = (*bad_wqe_cnt)+1;
-	}
-	/*
-	 * bad wqe will be reprocessed and ignored when pol_cq() is called,
-	 *  i.e. nr of wqes with flush error status is one less
-	 */
-	ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
-		 qp_num, (*bad_wqe_cnt)-1);
-	wqe->wqef = 0;
-
-	return 0;
-}
-
-static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
-			  struct ehca_queue_map *qmap)
-{
-	void *wqe_v;
-	u64 q_ofs;
-	u32 wqe_idx;
-	unsigned int tail_idx;
-
-	/* convert real to abs address */
-	wqe_p = wqe_p & (~(1UL << 63));
-
-	wqe_v = __va(wqe_p);
-
-	if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
-		ehca_gen_err("Invalid offset for calculating left cqes "
-				"wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
-		return -EFAULT;
-	}
-
-	tail_idx = next_index(qmap->tail, qmap->entries);
-	wqe_idx = q_ofs / ipz_queue->qe_size;
-
-	/* check all processed wqes, whether a cqe is requested or not */
-	while (tail_idx != wqe_idx) {
-		if (qmap->map[tail_idx].cqe_req)
-			qmap->left_to_poll++;
-		tail_idx = next_index(tail_idx, qmap->entries);
-	}
-	/* save index in queue, where we have to start flushing */
-	qmap->next_wqe_idx = wqe_idx;
-	return 0;
-}
-
-static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
-{
-	u64 h_ret;
-	void *send_wqe_p, *recv_wqe_p;
-	int ret;
-	unsigned long flags;
-	int qp_num = my_qp->ib_qp.qp_num;
-
-	/* this hcall is not supported on base QPs */
-	if (my_qp->ext_type != EQPT_SRQBASE) {
-		/* get send and receive wqe pointer */
-		h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
-				my_qp->ipz_qp_handle, &my_qp->pf,
-				&send_wqe_p, &recv_wqe_p, 4);
-		if (h_ret != H_SUCCESS) {
-			ehca_err(&shca->ib_device, "disable_and_get_wqe() "
-				 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
-				 my_qp, qp_num, h_ret);
-			return ehca2ib_return_code(h_ret);
-		}
-
-		/*
-		 * acquire lock to ensure that nobody is polling the cq which
-		 * could mean that the qmap->tail pointer is in an
-		 * inconsistent state.
-		 */
-		spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-		ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
-				&my_qp->sq_map);
-		spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-		if (ret)
-			return ret;
-
-
-		spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-		ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
-				&my_qp->rq_map);
-		spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
-		if (ret)
-			return ret;
-	} else {
-		spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-		my_qp->sq_map.left_to_poll = 0;
-		my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
-							my_qp->sq_map.entries);
-		spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
-		spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-		my_qp->rq_map.left_to_poll = 0;
-		my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
-							my_qp->rq_map.entries);
-		spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
-	}
-
-	/* this assures flush cqes being generated only for pending wqes */
-	if ((my_qp->sq_map.left_to_poll == 0) &&
-				(my_qp->rq_map.left_to_poll == 0)) {
-		spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-		ehca_add_to_err_list(my_qp, 1);
-		spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
-		if (HAS_RQ(my_qp)) {
-			spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-			ehca_add_to_err_list(my_qp, 0);
-			spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
-					flags);
-		}
-	}
-
-	return 0;
-}
-
-/*
- * internal_modify_qp with circumvention to handle aqp0 properly
- * smi_reset2init indicates if this is an internal reset-to-init-call for
- * smi. This flag must always be zero if called from ehca_modify_qp()!
- * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
- */
-static int internal_modify_qp(struct ib_qp *ibqp,
-			      struct ib_qp_attr *attr,
-			      int attr_mask, int smi_reset2init)
-{
-	enum ib_qp_state qp_cur_state, qp_new_state;
-	int cnt, qp_attr_idx, ret = 0;
-	enum ib_qp_statetrans statetrans;
-	struct hcp_modify_qp_control_block *mqpcb;
-	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-	struct ehca_shca *shca =
-		container_of(ibqp->pd->device, struct ehca_shca, ib_device);
-	u64 update_mask;
-	u64 h_ret;
-	int bad_wqe_cnt = 0;
-	int is_user = 0;
-	int squeue_locked = 0;
-	unsigned long flags = 0;
-
-	/* do query_qp to obtain current attr values */
-	mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-	if (!mqpcb) {
-		ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
-			 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
-				my_qp->ipz_qp_handle,
-				&my_qp->pf,
-				mqpcb, my_qp->galpas.kernel);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(ibqp->device, "hipz_h_query_qp() failed "
-			 "ehca_qp=%p qp_num=%x h_ret=%lli",
-			 my_qp, ibqp->qp_num, h_ret);
-		ret = ehca2ib_return_code(h_ret);
-		goto modify_qp_exit1;
-	}
-	if (ibqp->uobject)
-		is_user = 1;
-
-	qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
-
-	if (qp_cur_state == -EINVAL) {	/* invalid qp state */
-		ret = -EINVAL;
-		ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
-			 "ehca_qp=%p qp_num=%x",
-			 mqpcb->qp_state, my_qp, ibqp->qp_num);
-		goto modify_qp_exit1;
-	}
-	/*
-	 * circumvention to set aqp0 initial state to init
-	 * as expected by IB spec
-	 */
-	if (smi_reset2init == 0 &&
-	    ibqp->qp_type == IB_QPT_SMI &&
-	    qp_cur_state == IB_QPS_RESET &&
-	    (attr_mask & IB_QP_STATE) &&
-	    attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
-		struct ib_qp_attr smiqp_attr = {
-			.qp_state = IB_QPS_INIT,
-			.port_num = my_qp->init_attr.port_num,
-			.pkey_index = 0,
-			.qkey = 0
-		};
-		int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
-			IB_QP_PKEY_INDEX | IB_QP_QKEY;
-		int smirc = internal_modify_qp(
-			ibqp, &smiqp_attr, smiqp_attr_mask, 1);
-		if (smirc) {
-			ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
-				 "ehca_modify_qp() rc=%i", smirc);
-			ret = H_PARAMETER;
-			goto modify_qp_exit1;
-		}
-		qp_cur_state = IB_QPS_INIT;
-		ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
-	}
-	/* is transmitted current state  equal to "real" current state */
-	if ((attr_mask & IB_QP_CUR_STATE) &&
-	    qp_cur_state != attr->cur_qp_state) {
-		ret = -EINVAL;
-		ehca_err(ibqp->device,
-			 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
-			 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
-			 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
-		goto modify_qp_exit1;
-	}
-
-	ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
-		 "new qp_state=%x attribute_mask=%x",
-		 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
-
-	qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
-	if (!smi_reset2init &&
-	    !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
-				attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
-		ret = -EINVAL;
-		ehca_err(ibqp->device,
-			 "Invalid qp transition new_state=%x cur_state=%x "
-			 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
-			 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
-		goto modify_qp_exit1;
-	}
-
-	mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
-	if (mqpcb->qp_state)
-		update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-	else {
-		ret = -EINVAL;
-		ehca_err(ibqp->device, "Invalid new qp state=%x "
-			 "ehca_qp=%p qp_num=%x",
-			 qp_new_state, my_qp, ibqp->qp_num);
-		goto modify_qp_exit1;
-	}
-
-	/* retrieve state transition struct to get req and opt attrs */
-	statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
-	if (statetrans < 0) {
-		ret = -EINVAL;
-		ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
-			 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
-			 "qp_num=%x", qp_cur_state, qp_new_state,
-			 statetrans, my_qp, ibqp->qp_num);
-		goto modify_qp_exit1;
-	}
-
-	qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
-
-	if (qp_attr_idx < 0) {
-		ret = qp_attr_idx;
-		ehca_err(ibqp->device,
-			 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
-			 ibqp->qp_type, my_qp, ibqp->qp_num);
-		goto modify_qp_exit1;
-	}
-
-	ehca_dbg(ibqp->device,
-		 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
-		 my_qp, ibqp->qp_num, statetrans);
-
-	/* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
-	 * in non-LL UD QPs.
-	 */
-	if ((my_qp->qp_type == IB_QPT_UD) &&
-	    (my_qp->ext_type != EQPT_LLQP) &&
-	    (statetrans == IB_QPST_INIT2RTR) &&
-	    (shca->hw_level >= 0x22)) {
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
-		mqpcb->send_grh_flag = 1;
-	}
-
-	/* sqe -> rts: set purge bit of bad wqe before actual trans */
-	if ((my_qp->qp_type == IB_QPT_UD ||
-	     my_qp->qp_type == IB_QPT_GSI ||
-	     my_qp->qp_type == IB_QPT_SMI) &&
-	    statetrans == IB_QPST_SQE2RTS) {
-		/* mark next free wqe if kernel */
-		if (!ibqp->uobject) {
-			struct ehca_wqe *wqe;
-			/* lock send queue */
-			spin_lock_irqsave(&my_qp->spinlock_s, flags);
-			squeue_locked = 1;
-			/* mark next free wqe */
-			wqe = (struct ehca_wqe *)
-				ipz_qeit_get(&my_qp->ipz_squeue);
-			wqe->optype = wqe->wqef = 0xff;
-			ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
-				 ibqp->qp_num, wqe);
-		}
-		ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
-		if (ret) {
-			ehca_err(ibqp->device, "prepare_sqe_rts() failed "
-				 "ehca_qp=%p qp_num=%x ret=%i",
-				 my_qp, ibqp->qp_num, ret);
-			goto modify_qp_exit2;
-		}
-	}
-
-	/*
-	 * enable RDMA_Atomic_Control if reset->init und reliable con
-	 * this is necessary since gen2 does not provide that flag,
-	 * but pHyp requires it
-	 */
-	if (statetrans == IB_QPST_RESET2INIT &&
-	    (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
-		mqpcb->rdma_atomic_ctrl = 3;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
-	}
-	/* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
-	if (statetrans == IB_QPST_INIT2RTR &&
-	    (ibqp->qp_type == IB_QPT_UC) &&
-	    !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
-		mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
-		update_mask |=
-			EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
-	}
-
-	if (attr_mask & IB_QP_PKEY_INDEX) {
-		if (attr->pkey_index >= 16) {
-			ret = -EINVAL;
-			ehca_err(ibqp->device, "Invalid pkey_index=%x. "
-				 "ehca_qp=%p qp_num=%x max_pkey_index=f",
-				 attr->pkey_index, my_qp, ibqp->qp_num);
-			goto modify_qp_exit2;
-		}
-		mqpcb->prim_p_key_idx = attr->pkey_index;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
-	}
-	if (attr_mask & IB_QP_PORT) {
-		struct ehca_sport *sport;
-		struct ehca_qp *aqp1;
-		if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
-			ret = -EINVAL;
-			ehca_err(ibqp->device, "Invalid port=%x. "
-				 "ehca_qp=%p qp_num=%x num_ports=%x",
-				 attr->port_num, my_qp, ibqp->qp_num,
-				 shca->num_ports);
-			goto modify_qp_exit2;
-		}
-		sport = &shca->sport[attr->port_num - 1];
-		if (!sport->ibqp_sqp[IB_QPT_GSI]) {
-			/* should not occur */
-			ret = -EFAULT;
-			ehca_err(ibqp->device, "AQP1 was not created for "
-				 "port=%x", attr->port_num);
-			goto modify_qp_exit2;
-		}
-		aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
-				    struct ehca_qp, ib_qp);
-		if (ibqp->qp_type != IB_QPT_GSI &&
-		    ibqp->qp_type != IB_QPT_SMI &&
-		    aqp1->mod_qp_parm) {
-			/*
-			 * firmware will reject this modify_qp() because
-			 * port is not activated/initialized fully
-			 */
-			ret = -EFAULT;
-			ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
-				  "either port is being activated (try again) "
-				  "or cabling issue", attr->port_num);
-			goto modify_qp_exit2;
-		}
-		mqpcb->prim_phys_port = attr->port_num;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
-	}
-	if (attr_mask & IB_QP_QKEY) {
-		mqpcb->qkey = attr->qkey;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
-	}
-	if (attr_mask & IB_QP_AV) {
-		mqpcb->dlid = attr->ah_attr.dlid;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
-		mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
-		mqpcb->service_level = attr->ah_attr.sl;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
-
-		if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
-				  attr->ah_attr.static_rate,
-				  &mqpcb->max_static_rate)) {
-			ret = -EINVAL;
-			goto modify_qp_exit2;
-		}
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
-
-		/*
-		 * Always supply the GRH flag, even if it's zero, to give the
-		 * hypervisor a clear "yes" or "no" instead of a "perhaps"
-		 */
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
-
-		/*
-		 * only if GRH is TRUE we might consider SOURCE_GID_IDX
-		 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
-		 */
-		if (attr->ah_attr.ah_flags == IB_AH_GRH) {
-			mqpcb->send_grh_flag = 1;
-
-			mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
-			update_mask |=
-				EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
-
-			for (cnt = 0; cnt < 16; cnt++)
-				mqpcb->dest_gid.byte[cnt] =
-					attr->ah_attr.grh.dgid.raw[cnt];
-
-			update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
-			mqpcb->flow_label = attr->ah_attr.grh.flow_label;
-			update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
-			mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
-			update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
-			mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
-			update_mask |=
-				EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
-		}
-	}
-
-	if (attr_mask & IB_QP_PATH_MTU) {
-		/* store ld(MTU) */
-		my_qp->mtu_shift = attr->path_mtu + 7;
-		mqpcb->path_mtu = attr->path_mtu;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
-	}
-	if (attr_mask & IB_QP_TIMEOUT) {
-		mqpcb->timeout = attr->timeout;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
-	}
-	if (attr_mask & IB_QP_RETRY_CNT) {
-		mqpcb->retry_count = attr->retry_cnt;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
-	}
-	if (attr_mask & IB_QP_RNR_RETRY) {
-		mqpcb->rnr_retry_count = attr->rnr_retry;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
-	}
-	if (attr_mask & IB_QP_RQ_PSN) {
-		mqpcb->receive_psn = attr->rq_psn;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
-	}
-	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-		mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
-			attr->max_dest_rd_atomic : 2;
-		update_mask |=
-			EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
-	}
-	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-		mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
-			attr->max_rd_atomic : 2;
-		update_mask |=
-			EHCA_BMASK_SET
-			(MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
-	}
-	if (attr_mask & IB_QP_ALT_PATH) {
-		if (attr->alt_port_num < 1
-		    || attr->alt_port_num > shca->num_ports) {
-			ret = -EINVAL;
-			ehca_err(ibqp->device, "Invalid alt_port=%x. "
-				 "ehca_qp=%p qp_num=%x num_ports=%x",
-				 attr->alt_port_num, my_qp, ibqp->qp_num,
-				 shca->num_ports);
-			goto modify_qp_exit2;
-		}
-		mqpcb->alt_phys_port = attr->alt_port_num;
-
-		if (attr->alt_pkey_index >= 16) {
-			ret = -EINVAL;
-			ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
-				 "ehca_qp=%p qp_num=%x max_pkey_index=f",
-				 attr->pkey_index, my_qp, ibqp->qp_num);
-			goto modify_qp_exit2;
-		}
-		mqpcb->alt_p_key_idx = attr->alt_pkey_index;
-
-		mqpcb->timeout_al = attr->alt_timeout;
-		mqpcb->dlid_al = attr->alt_ah_attr.dlid;
-		mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
-		mqpcb->service_level_al = attr->alt_ah_attr.sl;
-
-		if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
-				  attr->alt_ah_attr.static_rate,
-				  &mqpcb->max_static_rate_al)) {
-			ret = -EINVAL;
-			goto modify_qp_exit2;
-		}
-
-		/* OpenIB doesn't support alternate retry counts - copy them */
-		mqpcb->retry_count_al = mqpcb->retry_count;
-		mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
-
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
-
-		/*
-		 * Always supply the GRH flag, even if it's zero, to give the
-		 * hypervisor a clear "yes" or "no" instead of a "perhaps"
-		 */
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
-
-		/*
-		 * only if GRH is TRUE we might consider SOURCE_GID_IDX
-		 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
-		 */
-		if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
-			mqpcb->send_grh_flag_al = 1;
-
-			for (cnt = 0; cnt < 16; cnt++)
-				mqpcb->dest_gid_al.byte[cnt] =
-					attr->alt_ah_attr.grh.dgid.raw[cnt];
-			mqpcb->source_gid_idx_al =
-				attr->alt_ah_attr.grh.sgid_index;
-			mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
-			mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
-			mqpcb->traffic_class_al =
-				attr->alt_ah_attr.grh.traffic_class;
-
-			update_mask |=
-				EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
-				| EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
-				| EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
-				| EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
-				EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
-		}
-	}
-
-	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
-		mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
-		update_mask |=
-			EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
-	}
-
-	if (attr_mask & IB_QP_SQ_PSN) {
-		mqpcb->send_psn = attr->sq_psn;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
-	}
-
-	if (attr_mask & IB_QP_DEST_QPN) {
-		mqpcb->dest_qp_nr = attr->dest_qp_num;
-		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
-	}
-
-	if (attr_mask & IB_QP_PATH_MIG_STATE) {
-		if (attr->path_mig_state != IB_MIG_REARM
-		    && attr->path_mig_state != IB_MIG_MIGRATED) {
-			ret = -EINVAL;
-			ehca_err(ibqp->device, "Invalid mig_state=%x",
-				 attr->path_mig_state);
-			goto modify_qp_exit2;
-		}
-		mqpcb->path_migration_state = attr->path_mig_state + 1;
-		if (attr->path_mig_state == IB_MIG_REARM)
-			my_qp->mig_armed = 1;
-		update_mask |=
-			EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
-	}
-
-	if (attr_mask & IB_QP_CAP) {
-		mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
-		update_mask |=
-			EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
-		mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
-		update_mask |=
-			EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
-		/* no support for max_send/recv_sge yet */
-	}
-
-	if (ehca_debug_level >= 2)
-		ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
-
-	h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
-				 my_qp->ipz_qp_handle,
-				 &my_qp->pf,
-				 update_mask,
-				 mqpcb, my_qp->galpas.kernel);
-
-	if (h_ret != H_SUCCESS) {
-		ret = ehca2ib_return_code(h_ret);
-		ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
-			 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
-		goto modify_qp_exit2;
-	}
-
-	if ((my_qp->qp_type == IB_QPT_UD ||
-	     my_qp->qp_type == IB_QPT_GSI ||
-	     my_qp->qp_type == IB_QPT_SMI) &&
-	    statetrans == IB_QPST_SQE2RTS) {
-		/* doorbell to reprocessing wqes */
-		iosync(); /* serialize GAL register access */
-		hipz_update_sqa(my_qp, bad_wqe_cnt-1);
-		ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
-	}
-
-	if (statetrans == IB_QPST_RESET2INIT ||
-	    statetrans == IB_QPST_INIT2INIT) {
-		mqpcb->qp_enable = 1;
-		mqpcb->qp_state = EHCA_QPS_INIT;
-		update_mask = 0;
-		update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
-
-		h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
-					 my_qp->ipz_qp_handle,
-					 &my_qp->pf,
-					 update_mask,
-					 mqpcb,
-					 my_qp->galpas.kernel);
-
-		if (h_ret != H_SUCCESS) {
-			ret = ehca2ib_return_code(h_ret);
-			ehca_err(ibqp->device, "ENABLE in context of "
-				 "RESET_2_INIT failed! Maybe you didn't get "
-				 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
-				 h_ret, my_qp, ibqp->qp_num);
-			goto modify_qp_exit2;
-		}
-	}
-	if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
-	    && !is_user) {
-		ret = check_for_left_cqes(my_qp, shca);
-		if (ret)
-			goto modify_qp_exit2;
-	}
-
-	if (statetrans == IB_QPST_ANY2RESET) {
-		ipz_qeit_reset(&my_qp->ipz_rqueue);
-		ipz_qeit_reset(&my_qp->ipz_squeue);
-
-		if (qp_cur_state == IB_QPS_ERR && !is_user) {
-			del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
-			if (HAS_RQ(my_qp))
-				del_from_err_list(my_qp->recv_cq,
-						  &my_qp->rq_err_node);
-		}
-		if (!is_user)
-			reset_queue_map(&my_qp->sq_map);
-
-		if (HAS_RQ(my_qp) && !is_user)
-			reset_queue_map(&my_qp->rq_map);
-	}
-
-	if (attr_mask & IB_QP_QKEY)
-		my_qp->qkey = attr->qkey;
-
-modify_qp_exit2:
-	if (squeue_locked) { /* this means: sqe -> rts */
-		spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-		my_qp->sqerr_purgeflag = 1;
-	}
-
-modify_qp_exit1:
-	ehca_free_fw_ctrlblock(mqpcb);
-
-	return ret;
-}
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
-		   struct ib_udata *udata)
-{
-	int ret = 0;
-
-	struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
-					      ib_device);
-	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
-	/* The if-block below caches qp_attr to be modified for GSI and SMI
-	 * qps during the initialization by ib_mad. When the respective port
-	 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
-	 * cached modify calls sequence, see ehca_recover_sqs() below.
-	 * Why that is required:
-	 * 1) If one port is connected, older code requires that port one
-	 *    to be connected and module option nr_ports=1 to be given by
-	 *    user, which is very inconvenient for end user.
-	 * 2) Firmware accepts modify_qp() only if respective port has become
-	 *    active. Older code had a wait loop of 30sec create_qp()/
-	 *    define_aqp1(), which is not appropriate in practice. This
-	 *    code now removes that wait loop, see define_aqp1(), and always
-	 *    reports all ports to ib_mad resp. users. Only activated ports
-	 *    will then usable for the users.
-	 */
-	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
-		int port = my_qp->init_attr.port_num;
-		struct ehca_sport *sport = &shca->sport[port - 1];
-		unsigned long flags;
-		spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-		/* cache qp_attr only during init */
-		if (my_qp->mod_qp_parm) {
-			struct ehca_mod_qp_parm *p;
-			if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
-				ehca_err(&shca->ib_device,
-					 "mod_qp_parm overflow state=%x port=%x"
-					 " type=%x", attr->qp_state,
-					 my_qp->init_attr.port_num,
-					 ibqp->qp_type);
-				spin_unlock_irqrestore(&sport->mod_sqp_lock,
-						       flags);
-				return -EINVAL;
-			}
-			p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
-			p->mask = attr_mask;
-			p->attr = *attr;
-			my_qp->mod_qp_parm_idx++;
-			ehca_dbg(&shca->ib_device,
-				 "Saved qp_attr for state=%x port=%x type=%x",
-				 attr->qp_state, my_qp->init_attr.port_num,
-				 ibqp->qp_type);
-			spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-			goto out;
-		}
-		spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-	}
-
-	ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
-
-out:
-	if ((ret == 0) && (attr_mask & IB_QP_STATE))
-		my_qp->state = attr->qp_state;
-
-	return ret;
-}
-
-void ehca_recover_sqp(struct ib_qp *sqp)
-{
-	struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
-	int port = my_sqp->init_attr.port_num;
-	struct ib_qp_attr attr;
-	struct ehca_mod_qp_parm *qp_parm;
-	int i, qp_parm_idx, ret;
-	unsigned long flags, wr_cnt;
-
-	if (!my_sqp->mod_qp_parm)
-		return;
-	ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
-
-	qp_parm = my_sqp->mod_qp_parm;
-	qp_parm_idx = my_sqp->mod_qp_parm_idx;
-	for (i = 0; i < qp_parm_idx; i++) {
-		attr = qp_parm[i].attr;
-		ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
-		if (ret) {
-			ehca_err(sqp->device, "Could not modify SQP port=%x "
-				 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
-			goto free_qp_parm;
-		}
-		ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
-			 port, sqp->qp_num, attr.qp_state);
-	}
-
-	/* re-trigger posted recv wrs */
-	wr_cnt =  my_sqp->ipz_rqueue.current_q_offset /
-		my_sqp->ipz_rqueue.qe_size;
-	if (wr_cnt) {
-		spin_lock_irqsave(&my_sqp->spinlock_r, flags);
-		hipz_update_rqa(my_sqp, wr_cnt);
-		spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
-		ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
-			 port, sqp->qp_num, wr_cnt);
-	}
-
-free_qp_parm:
-	kfree(qp_parm);
-	/* this prevents subsequent calls to modify_qp() to cache qp_attr */
-	my_sqp->mod_qp_parm = NULL;
-}
-
-int ehca_query_qp(struct ib_qp *qp,
-		  struct ib_qp_attr *qp_attr,
-		  int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
-{
-	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-	struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
-					      ib_device);
-	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-	struct hcp_modify_qp_control_block *qpcb;
-	int cnt, ret = 0;
-	u64 h_ret;
-
-	if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
-		ehca_err(qp->device, "Invalid attribute mask "
-			 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
-			 my_qp, qp->qp_num, qp_attr_mask);
-		return -EINVAL;
-	}
-
-	qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!qpcb) {
-		ehca_err(qp->device, "Out of memory for qpcb "
-			 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_qp(adapter_handle,
-				my_qp->ipz_qp_handle,
-				&my_qp->pf,
-				qpcb, my_qp->galpas.kernel);
-
-	if (h_ret != H_SUCCESS) {
-		ret = ehca2ib_return_code(h_ret);
-		ehca_err(qp->device, "hipz_h_query_qp() failed "
-			 "ehca_qp=%p qp_num=%x h_ret=%lli",
-			 my_qp, qp->qp_num, h_ret);
-		goto query_qp_exit1;
-	}
-
-	qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
-	qp_attr->qp_state = qp_attr->cur_qp_state;
-
-	if (qp_attr->cur_qp_state == -EINVAL) {
-		ret = -EINVAL;
-		ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
-			 "ehca_qp=%p qp_num=%x",
-			 qpcb->qp_state, my_qp, qp->qp_num);
-		goto query_qp_exit1;
-	}
-
-	if (qp_attr->qp_state == IB_QPS_SQD)
-		qp_attr->sq_draining = 1;
-
-	qp_attr->qkey = qpcb->qkey;
-	qp_attr->path_mtu = qpcb->path_mtu;
-	qp_attr->path_mig_state = qpcb->path_migration_state - 1;
-	qp_attr->rq_psn = qpcb->receive_psn;
-	qp_attr->sq_psn = qpcb->send_psn;
-	qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
-	qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
-	qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
-	/* UD_AV CIRCUMVENTION */
-	if (my_qp->qp_type == IB_QPT_UD) {
-		qp_attr->cap.max_send_sge =
-			qpcb->actual_nr_sges_in_sq_wqe - 2;
-		qp_attr->cap.max_recv_sge =
-			qpcb->actual_nr_sges_in_rq_wqe - 2;
-	} else {
-		qp_attr->cap.max_send_sge =
-			qpcb->actual_nr_sges_in_sq_wqe;
-		qp_attr->cap.max_recv_sge =
-			qpcb->actual_nr_sges_in_rq_wqe;
-	}
-
-	qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
-	qp_attr->dest_qp_num = qpcb->dest_qp_nr;
-
-	qp_attr->pkey_index = qpcb->prim_p_key_idx;
-	qp_attr->port_num = qpcb->prim_phys_port;
-	qp_attr->timeout = qpcb->timeout;
-	qp_attr->retry_cnt = qpcb->retry_count;
-	qp_attr->rnr_retry = qpcb->rnr_retry_count;
-
-	qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
-	qp_attr->alt_port_num = qpcb->alt_phys_port;
-	qp_attr->alt_timeout = qpcb->timeout_al;
-
-	qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
-	qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
-
-	/* primary av */
-	qp_attr->ah_attr.sl = qpcb->service_level;
-
-	if (qpcb->send_grh_flag) {
-		qp_attr->ah_attr.ah_flags = IB_AH_GRH;
-	}
-
-	qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
-	qp_attr->ah_attr.dlid = qpcb->dlid;
-	qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
-	qp_attr->ah_attr.port_num = qp_attr->port_num;
-
-	/* primary GRH */
-	qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
-	qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
-	qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
-	qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
-
-	for (cnt = 0; cnt < 16; cnt++)
-		qp_attr->ah_attr.grh.dgid.raw[cnt] =
-			qpcb->dest_gid.byte[cnt];
-
-	/* alternate AV */
-	qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
-	if (qpcb->send_grh_flag_al) {
-		qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
-	}
-
-	qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
-	qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
-	qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
-
-	/* alternate GRH */
-	qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
-	qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
-	qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
-	qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
-
-	for (cnt = 0; cnt < 16; cnt++)
-		qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
-			qpcb->dest_gid_al.byte[cnt];
-
-	/* return init attributes given in ehca_create_qp */
-	if (qp_init_attr)
-		*qp_init_attr = my_qp->init_attr;
-
-	if (ehca_debug_level >= 2)
-		ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
-
-query_qp_exit1:
-	ehca_free_fw_ctrlblock(qpcb);
-
-	return ret;
-}
-
-int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-		    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
-{
-	struct ehca_qp *my_qp =
-		container_of(ibsrq, struct ehca_qp, ib_srq);
-	struct ehca_shca *shca =
-		container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
-	struct hcp_modify_qp_control_block *mqpcb;
-	u64 update_mask;
-	u64 h_ret;
-	int ret = 0;
-
-	mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!mqpcb) {
-		ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
-			 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
-		return -ENOMEM;
-	}
-
-	update_mask = 0;
-	if (attr_mask & IB_SRQ_LIMIT) {
-		attr_mask &= ~IB_SRQ_LIMIT;
-		update_mask |=
-			EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
-			| EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
-		mqpcb->curr_srq_limit = attr->srq_limit;
-		mqpcb->qp_aff_asyn_ev_log_reg =
-			EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
-	}
-
-	/* by now, all bits in attr_mask should have been cleared */
-	if (attr_mask) {
-		ehca_err(ibsrq->device, "invalid attribute mask bits set  "
-			 "attr_mask=%x", attr_mask);
-		ret = -EINVAL;
-		goto modify_srq_exit0;
-	}
-
-	if (ehca_debug_level >= 2)
-		ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
-	h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
-				 NULL, update_mask, mqpcb,
-				 my_qp->galpas.kernel);
-
-	if (h_ret != H_SUCCESS) {
-		ret = ehca2ib_return_code(h_ret);
-		ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
-			 "ehca_qp=%p qp_num=%x",
-			 h_ret, my_qp, my_qp->real_qp_num);
-	}
-
-modify_srq_exit0:
-	ehca_free_fw_ctrlblock(mqpcb);
-
-	return ret;
-}
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
-{
-	struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
-	struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
-					      ib_device);
-	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-	struct hcp_modify_qp_control_block *qpcb;
-	int ret = 0;
-	u64 h_ret;
-
-	qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-	if (!qpcb) {
-		ehca_err(srq->device, "Out of memory for qpcb "
-			 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
-		return -ENOMEM;
-	}
-
-	h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
-				NULL, qpcb, my_qp->galpas.kernel);
-
-	if (h_ret != H_SUCCESS) {
-		ret = ehca2ib_return_code(h_ret);
-		ehca_err(srq->device, "hipz_h_query_qp() failed "
-			 "ehca_qp=%p qp_num=%x h_ret=%lli",
-			 my_qp, my_qp->real_qp_num, h_ret);
-		goto query_srq_exit1;
-	}
-
-	srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
-	srq_attr->max_sge = 3;
-	srq_attr->srq_limit = qpcb->curr_srq_limit;
-
-	if (ehca_debug_level >= 2)
-		ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
-query_srq_exit1:
-	ehca_free_fw_ctrlblock(qpcb);
-
-	return ret;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
-			       struct ib_uobject *uobject)
-{
-	struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
-	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
-					     ib_pd);
-	struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
-	u32 qp_num = my_qp->real_qp_num;
-	int ret;
-	u64 h_ret;
-	u8 port_num;
-	int is_user = 0;
-	enum ib_qp_type	qp_type;
-	unsigned long flags;
-
-	if (uobject) {
-		is_user = 1;
-		if (my_qp->mm_count_galpa ||
-		    my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
-			ehca_err(dev, "Resources still referenced in "
-				 "user space qp_num=%x", qp_num);
-			return -EINVAL;
-		}
-	}
-
-	if (my_qp->send_cq) {
-		ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
-		if (ret) {
-			ehca_err(dev, "Couldn't unassign qp from "
-				 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
-				 qp_num, my_qp->send_cq->cq_number);
-			return ret;
-		}
-	}
-
-	write_lock_irqsave(&ehca_qp_idr_lock, flags);
-	idr_remove(&ehca_qp_idr, my_qp->token);
-	write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
-	/*
-	 * SRQs will never get into an error list and do not have a recv_cq,
-	 * so we need to skip them here.
-	 */
-	if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
-		del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
-
-	if (HAS_SQ(my_qp) && !is_user)
-		del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
-	/* now wait until all pending events have completed */
-	wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
-
-	h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
-	if (h_ret != H_SUCCESS) {
-		ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
-			 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
-		return ehca2ib_return_code(h_ret);
-	}
-
-	port_num = my_qp->init_attr.port_num;
-	qp_type  = my_qp->init_attr.qp_type;
-
-	if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-		spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-		kfree(my_qp->mod_qp_parm);
-		my_qp->mod_qp_parm = NULL;
-		shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
-		spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-	}
-
-	/* no support for IB_QPT_SMI yet */
-	if (qp_type == IB_QPT_GSI) {
-		struct ib_event event;
-		ehca_info(dev, "device %s: port %x is inactive.",
-				shca->ib_device.name, port_num);
-		event.device = &shca->ib_device;
-		event.event = IB_EVENT_PORT_ERR;
-		event.element.port_num = port_num;
-		shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
-		ib_dispatch_event(&event);
-	}
-
-	if (HAS_RQ(my_qp)) {
-		ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-		if (!is_user)
-			vfree(my_qp->rq_map.map);
-	}
-	if (HAS_SQ(my_qp)) {
-		ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-		if (!is_user)
-			vfree(my_qp->sq_map.map);
-	}
-	kmem_cache_free(qp_cache, my_qp);
-	atomic_dec(&shca->num_qps);
-	return 0;
-}
-
-int ehca_destroy_qp(struct ib_qp *qp)
-{
-	return internal_destroy_qp(qp->device,
-				   container_of(qp, struct ehca_qp, ib_qp),
-				   qp->uobject);
-}
-
-int ehca_destroy_srq(struct ib_srq *srq)
-{
-	return internal_destroy_qp(srq->device,
-				   container_of(srq, struct ehca_qp, ib_srq),
-				   srq->uobject);
-}
-
-int ehca_init_qp_cache(void)
-{
-	qp_cache = kmem_cache_create("ehca_cache_qp",
-				     sizeof(struct ehca_qp), 0,
-				     SLAB_HWCACHE_ALIGN,
-				     NULL);
-	if (!qp_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void ehca_cleanup_qp_cache(void)
-{
-	kmem_cache_destroy(qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
deleted file mode 100644
index 11813b8..0000000
--- a/drivers/staging/rdma/ehca/ehca_reqs.c
+++ /dev/null
@@ -1,953 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  post_send/recv, poll_cq, req_notify
- *
- *  Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-/* in RC traffic, insert an empty RDMA READ every this many packets */
-#define ACK_CIRC_THRESHOLD 2000000
-
-static u64 replace_wr_id(u64 wr_id, u16 idx)
-{
-	u64 ret;
-
-	ret = wr_id & ~QMAP_IDX_MASK;
-	ret |= idx & QMAP_IDX_MASK;
-
-	return ret;
-}
-
-static u16 get_app_wr_id(u64 wr_id)
-{
-	return wr_id & QMAP_IDX_MASK;
-}
-
-static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
-				  struct ehca_wqe *wqe_p,
-				  struct ib_recv_wr *recv_wr,
-				  u32 rq_map_idx)
-{
-	u8 cnt_ds;
-	if (unlikely((recv_wr->num_sge < 0) ||
-		     (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
-		ehca_gen_err("Invalid number of WQE SGE. "
-			 "num_sqe=%x max_nr_of_sg=%x",
-			 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
-		return -EINVAL; /* invalid SG list length */
-	}
-
-	/* clear wqe header until sglist */
-	memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
-	wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
-	wqe_p->nr_of_data_seg = recv_wr->num_sge;
-
-	for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
-		wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
-			recv_wr->sg_list[cnt_ds].addr;
-		wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
-			recv_wr->sg_list[cnt_ds].lkey;
-		wqe_p->u.all_rcv.sg_list[cnt_ds].length =
-			recv_wr->sg_list[cnt_ds].length;
-	}
-
-	if (ehca_debug_level >= 3) {
-		ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
-			     ipz_rqueue);
-		ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
-	}
-
-	return 0;
-}
-
-#if defined(DEBUG_GSI_SEND_WR)
-
-/* need ib_mad struct */
-#include <rdma/ib_mad.h>
-
-static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
-{
-	int idx;
-	int j;
-	while (ud_wr) {
-		struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
-		struct ib_sge *sge = ud_wr->wr.sg_list;
-		ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
-			     "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
-			     ud_wr->wr.num_sge, ud_wr->wr.send_flags,
-			     ud_wr->.wr.opcode);
-		if (mad_hdr) {
-			ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
-				     "mgmt_class=%x class_version=%x method=%x "
-				     "status=%x class_specific=%x tid=%lx "
-				     "attr_id=%x resv=%x attr_mod=%x",
-				     idx, mad_hdr->base_version,
-				     mad_hdr->mgmt_class,
-				     mad_hdr->class_version, mad_hdr->method,
-				     mad_hdr->status, mad_hdr->class_specific,
-				     mad_hdr->tid, mad_hdr->attr_id,
-				     mad_hdr->resv,
-				     mad_hdr->attr_mod);
-		}
-		for (j = 0; j < ud_wr->wr.num_sge; j++) {
-			u8 *data = __va(sge->addr);
-			ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
-				     "lkey=%x",
-				     idx, j, data, sge->length, sge->lkey);
-			/* assume length is n*16 */
-			ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
-				 idx, j);
-			sge++;
-		} /* eof for j */
-		idx++;
-		ud_wr = ud_wr(ud_wr->wr.next);
-	} /* eof while ud_wr */
-}
-
-#endif /* DEBUG_GSI_SEND_WR */
-
-static inline int ehca_write_swqe(struct ehca_qp *qp,
-				  struct ehca_wqe *wqe_p,
-				  struct ib_send_wr *send_wr,
-				  u32 sq_map_idx,
-				  int hidden)
-{
-	u32 idx;
-	u64 dma_length;
-	struct ehca_av *my_av;
-	u32 remote_qkey;
-	struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
-
-	if (unlikely((send_wr->num_sge < 0) ||
-		     (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
-		ehca_gen_err("Invalid number of WQE SGE. "
-			 "num_sqe=%x max_nr_of_sg=%x",
-			 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
-		return -EINVAL; /* invalid SG list length */
-	}
-
-	/* clear wqe header until sglist */
-	memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
-	wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
-
-	qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
-	qmap_entry->reported = 0;
-	qmap_entry->cqe_req = 0;
-
-	switch (send_wr->opcode) {
-	case IB_WR_SEND:
-	case IB_WR_SEND_WITH_IMM:
-		wqe_p->optype = WQE_OPTYPE_SEND;
-		break;
-	case IB_WR_RDMA_WRITE:
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
-		break;
-	case IB_WR_RDMA_READ:
-		wqe_p->optype = WQE_OPTYPE_RDMAREAD;
-		break;
-	default:
-		ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
-		return -EINVAL; /* invalid opcode */
-	}
-
-	wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
-
-	wqe_p->wr_flag = 0;
-
-	if ((send_wr->send_flags & IB_SEND_SIGNALED ||
-	    qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
-	    && !hidden) {
-		wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
-		qmap_entry->cqe_req = 1;
-	}
-
-	if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
-	    send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
-		/* this might not work as long as HW does not support it */
-		wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
-		wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
-	}
-
-	wqe_p->nr_of_data_seg = send_wr->num_sge;
-
-	switch (qp->qp_type) {
-	case IB_QPT_SMI:
-	case IB_QPT_GSI:
-		/* no break is intential here */
-	case IB_QPT_UD:
-		/* IB 1.2 spec C10-15 compliance */
-		remote_qkey = ud_wr(send_wr)->remote_qkey;
-		if (remote_qkey & 0x80000000)
-			remote_qkey = qp->qkey;
-
-		wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
-		wqe_p->local_ee_context_qkey = remote_qkey;
-		if (unlikely(!ud_wr(send_wr)->ah)) {
-			ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
-			return -EINVAL;
-		}
-		if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
-			ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
-			return -EINVAL;
-		}
-		my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
-		wqe_p->u.ud_av.ud_av = my_av->av;
-
-		/*
-		 * omitted check of IB_SEND_INLINE
-		 * since HW does not support it
-		 */
-		for (idx = 0; idx < send_wr->num_sge; idx++) {
-			wqe_p->u.ud_av.sg_list[idx].vaddr =
-				send_wr->sg_list[idx].addr;
-			wqe_p->u.ud_av.sg_list[idx].lkey =
-				send_wr->sg_list[idx].lkey;
-			wqe_p->u.ud_av.sg_list[idx].length =
-				send_wr->sg_list[idx].length;
-		} /* eof for idx */
-		if (qp->qp_type == IB_QPT_SMI ||
-		    qp->qp_type == IB_QPT_GSI)
-			wqe_p->u.ud_av.ud_av.pmtu = 1;
-		if (qp->qp_type == IB_QPT_GSI) {
-			wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
-#ifdef DEBUG_GSI_SEND_WR
-			trace_ud_wr(ud_wr(send_wr));
-#endif /* DEBUG_GSI_SEND_WR */
-		}
-		break;
-
-	case IB_QPT_UC:
-		if (send_wr->send_flags & IB_SEND_FENCE)
-			wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
-		/* no break is intentional here */
-	case IB_QPT_RC:
-		/* TODO: atomic not implemented */
-		wqe_p->u.nud.remote_virtual_address =
-			rdma_wr(send_wr)->remote_addr;
-		wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
-
-		/*
-		 * omitted checking of IB_SEND_INLINE
-		 * since HW does not support it
-		 */
-		dma_length = 0;
-		for (idx = 0; idx < send_wr->num_sge; idx++) {
-			wqe_p->u.nud.sg_list[idx].vaddr =
-				send_wr->sg_list[idx].addr;
-			wqe_p->u.nud.sg_list[idx].lkey =
-				send_wr->sg_list[idx].lkey;
-			wqe_p->u.nud.sg_list[idx].length =
-				send_wr->sg_list[idx].length;
-			dma_length += send_wr->sg_list[idx].length;
-		} /* eof idx */
-		wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
-
-		/* unsolicited ack circumvention */
-		if (send_wr->opcode == IB_WR_RDMA_READ) {
-			/* on RDMA read, switch on and reset counters */
-			qp->message_count = qp->packet_count = 0;
-			qp->unsol_ack_circ = 1;
-		} else
-			/* else estimate #packets */
-			qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
-
-		break;
-
-	default:
-		ehca_gen_err("Invalid qptype=%x", qp->qp_type);
-		return -EINVAL;
-	}
-
-	if (ehca_debug_level >= 3) {
-		ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
-		ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
-	}
-	return 0;
-}
-
-/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
-static inline void map_ib_wc_status(u32 cqe_status,
-				    enum ib_wc_status *wc_status)
-{
-	if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
-		switch (cqe_status & 0x3F) {
-		case 0x01:
-		case 0x21:
-			*wc_status = IB_WC_LOC_LEN_ERR;
-			break;
-		case 0x02:
-		case 0x22:
-			*wc_status = IB_WC_LOC_QP_OP_ERR;
-			break;
-		case 0x03:
-		case 0x23:
-			*wc_status = IB_WC_LOC_EEC_OP_ERR;
-			break;
-		case 0x04:
-		case 0x24:
-			*wc_status = IB_WC_LOC_PROT_ERR;
-			break;
-		case 0x05:
-		case 0x25:
-			*wc_status = IB_WC_WR_FLUSH_ERR;
-			break;
-		case 0x06:
-			*wc_status = IB_WC_MW_BIND_ERR;
-			break;
-		case 0x07: /* remote error - look into bits 20:24 */
-			switch ((cqe_status
-				 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
-			case 0x0:
-				/*
-				 * PSN Sequence Error!
-				 * couldn't find a matching status!
-				 */
-				*wc_status = IB_WC_GENERAL_ERR;
-				break;
-			case 0x1:
-				*wc_status = IB_WC_REM_INV_REQ_ERR;
-				break;
-			case 0x2:
-				*wc_status = IB_WC_REM_ACCESS_ERR;
-				break;
-			case 0x3:
-				*wc_status = IB_WC_REM_OP_ERR;
-				break;
-			case 0x4:
-				*wc_status = IB_WC_REM_INV_RD_REQ_ERR;
-				break;
-			}
-			break;
-		case 0x08:
-			*wc_status = IB_WC_RETRY_EXC_ERR;
-			break;
-		case 0x09:
-			*wc_status = IB_WC_RNR_RETRY_EXC_ERR;
-			break;
-		case 0x0A:
-		case 0x2D:
-			*wc_status = IB_WC_REM_ABORT_ERR;
-			break;
-		case 0x0B:
-		case 0x2E:
-			*wc_status = IB_WC_INV_EECN_ERR;
-			break;
-		case 0x0C:
-		case 0x2F:
-			*wc_status = IB_WC_INV_EEC_STATE_ERR;
-			break;
-		case 0x0D:
-			*wc_status = IB_WC_BAD_RESP_ERR;
-			break;
-		case 0x10:
-			/* WQE purged */
-			*wc_status = IB_WC_WR_FLUSH_ERR;
-			break;
-		default:
-			*wc_status = IB_WC_FATAL_ERR;
-
-		}
-	} else
-		*wc_status = IB_WC_SUCCESS;
-}
-
-static inline int post_one_send(struct ehca_qp *my_qp,
-			 struct ib_send_wr *cur_send_wr,
-			 int hidden)
-{
-	struct ehca_wqe *wqe_p;
-	int ret;
-	u32 sq_map_idx;
-	u64 start_offset = my_qp->ipz_squeue.current_q_offset;
-
-	/* get pointer next to free WQE */
-	wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
-	if (unlikely(!wqe_p)) {
-		/* too many posted work requests: queue overflow */
-		ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
-			 "qp_num=%x", my_qp->ib_qp.qp_num);
-		return -ENOMEM;
-	}
-
-	/*
-	 * Get the index of the WQE in the send queue. The same index is used
-	 * for writing into the sq_map.
-	 */
-	sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
-
-	/* write a SEND WQE into the QUEUE */
-	ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
-	/*
-	 * if something failed,
-	 * reset the free entry pointer to the start value
-	 */
-	if (unlikely(ret)) {
-		my_qp->ipz_squeue.current_q_offset = start_offset;
-		ehca_err(my_qp->ib_qp.device, "Could not write WQE "
-			 "qp_num=%x", my_qp->ib_qp.qp_num);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int ehca_post_send(struct ib_qp *qp,
-		   struct ib_send_wr *send_wr,
-		   struct ib_send_wr **bad_send_wr)
-{
-	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-	int wqe_cnt = 0;
-	int ret = 0;
-	unsigned long flags;
-
-	/* Reject WR if QP is in RESET, INIT or RTR state */
-	if (unlikely(my_qp->state < IB_QPS_RTS)) {
-		ehca_err(qp->device, "Invalid QP state  qp_state=%d qpn=%x",
-			 my_qp->state, qp->qp_num);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* LOCK the QUEUE */
-	spin_lock_irqsave(&my_qp->spinlock_s, flags);
-
-	/* Send an empty extra RDMA read if:
-	 *  1) there has been an RDMA read on this connection before
-	 *  2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
-	 *  3) we can be sure that any previous extra RDMA read has been
-	 *     processed so we don't overflow the SQ
-	 */
-	if (unlikely(my_qp->unsol_ack_circ &&
-		     my_qp->packet_count > ACK_CIRC_THRESHOLD &&
-		     my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
-		/* insert an empty RDMA READ to fix up the remote QP state */
-		struct ib_send_wr circ_wr;
-		memset(&circ_wr, 0, sizeof(circ_wr));
-		circ_wr.opcode = IB_WR_RDMA_READ;
-		post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
-		wqe_cnt++;
-		ehca_dbg(qp->device, "posted circ wr  qp_num=%x", qp->qp_num);
-		my_qp->message_count = my_qp->packet_count = 0;
-	}
-
-	/* loop processes list of send reqs */
-	while (send_wr) {
-		ret = post_one_send(my_qp, send_wr, 0);
-		if (unlikely(ret)) {
-			goto post_send_exit0;
-		}
-		wqe_cnt++;
-		send_wr = send_wr->next;
-	}
-
-post_send_exit0:
-	iosync(); /* serialize GAL register access */
-	hipz_update_sqa(my_qp, wqe_cnt);
-	if (unlikely(ret || ehca_debug_level >= 2))
-		ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
-			 my_qp, qp->qp_num, wqe_cnt, ret);
-	my_qp->message_count += wqe_cnt;
-	spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-
-out:
-	if (ret)
-		*bad_send_wr = send_wr;
-	return ret;
-}
-
-static int internal_post_recv(struct ehca_qp *my_qp,
-			      struct ib_device *dev,
-			      struct ib_recv_wr *recv_wr,
-			      struct ib_recv_wr **bad_recv_wr)
-{
-	struct ehca_wqe *wqe_p;
-	int wqe_cnt = 0;
-	int ret = 0;
-	u32 rq_map_idx;
-	unsigned long flags;
-	struct ehca_qmap_entry *qmap_entry;
-
-	if (unlikely(!HAS_RQ(my_qp))) {
-		ehca_err(dev, "QP has no RQ  ehca_qp=%p qp_num=%x ext_type=%d",
-			 my_qp, my_qp->real_qp_num, my_qp->ext_type);
-		ret = -ENODEV;
-		goto out;
-	}
-
-	/* LOCK the QUEUE */
-	spin_lock_irqsave(&my_qp->spinlock_r, flags);
-
-	/* loop processes list of recv reqs */
-	while (recv_wr) {
-		u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
-		/* get pointer next to free WQE */
-		wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
-		if (unlikely(!wqe_p)) {
-			/* too many posted work requests: queue overflow */
-			ret = -ENOMEM;
-			ehca_err(dev, "Too many posted WQEs "
-				"qp_num=%x", my_qp->real_qp_num);
-			goto post_recv_exit0;
-		}
-		/*
-		 * Get the index of the WQE in the recv queue. The same index
-		 * is used for writing into the rq_map.
-		 */
-		rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
-
-		/* write a RECV WQE into the QUEUE */
-		ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
-				rq_map_idx);
-		/*
-		 * if something failed,
-		 * reset the free entry pointer to the start value
-		 */
-		if (unlikely(ret)) {
-			my_qp->ipz_rqueue.current_q_offset = start_offset;
-			ret = -EINVAL;
-			ehca_err(dev, "Could not write WQE "
-				"qp_num=%x", my_qp->real_qp_num);
-			goto post_recv_exit0;
-		}
-
-		qmap_entry = &my_qp->rq_map.map[rq_map_idx];
-		qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
-		qmap_entry->reported = 0;
-		qmap_entry->cqe_req = 1;
-
-		wqe_cnt++;
-		recv_wr = recv_wr->next;
-	} /* eof for recv_wr */
-
-post_recv_exit0:
-	iosync(); /* serialize GAL register access */
-	hipz_update_rqa(my_qp, wqe_cnt);
-	if (unlikely(ret || ehca_debug_level >= 2))
-	    ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
-		     my_qp, my_qp->real_qp_num, wqe_cnt, ret);
-	spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
-
-out:
-	if (ret)
-		*bad_recv_wr = recv_wr;
-
-	return ret;
-}
-
-int ehca_post_recv(struct ib_qp *qp,
-		   struct ib_recv_wr *recv_wr,
-		   struct ib_recv_wr **bad_recv_wr)
-{
-	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-
-	/* Reject WR if QP is in RESET state */
-	if (unlikely(my_qp->state == IB_QPS_RESET)) {
-		ehca_err(qp->device, "Invalid QP state  qp_state=%d qpn=%x",
-			 my_qp->state, qp->qp_num);
-		*bad_recv_wr = recv_wr;
-		return -EINVAL;
-	}
-
-	return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
-}
-
-int ehca_post_srq_recv(struct ib_srq *srq,
-		       struct ib_recv_wr *recv_wr,
-		       struct ib_recv_wr **bad_recv_wr)
-{
-	return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
-				  srq->device, recv_wr, bad_recv_wr);
-}
-
-/*
- * ib_wc_opcode table converts ehca wc opcode to ib
- * Since we use zero to indicate invalid opcode, the actual ib opcode must
- * be decremented!!!
- */
-static const u8 ib_wc_opcode[255] = {
-	[0x01] = IB_WC_RECV+1,
-	[0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
-	[0x08] = IB_WC_FETCH_ADD+1,
-	[0x10] = IB_WC_COMP_SWAP+1,
-	[0x20] = IB_WC_RDMA_WRITE+1,
-	[0x40] = IB_WC_RDMA_READ+1,
-	[0x80] = IB_WC_SEND+1
-};
-
-/* internal function to poll one entry of cq */
-static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
-{
-	int ret = 0, qmap_tail_idx;
-	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	struct ehca_cqe *cqe;
-	struct ehca_qp *my_qp;
-	struct ehca_qmap_entry *qmap_entry;
-	struct ehca_queue_map *qmap;
-	int cqe_count = 0, is_error;
-
-repoll:
-	cqe = (struct ehca_cqe *)
-		ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
-	if (!cqe) {
-		ret = -EAGAIN;
-		if (ehca_debug_level >= 3)
-			ehca_dbg(cq->device, "Completion queue is empty  "
-				 "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
-		goto poll_cq_one_exit0;
-	}
-
-	/* prevents loads being reordered across this point */
-	rmb();
-
-	cqe_count++;
-	if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
-		struct ehca_qp *qp;
-		int purgeflag;
-		unsigned long flags;
-
-		qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
-		if (!qp) {
-			ehca_err(cq->device, "cq_num=%x qp_num=%x "
-				 "could not find qp -> ignore cqe",
-				 my_cq->cq_number, cqe->local_qp_number);
-			ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
-				 my_cq->cq_number, cqe->local_qp_number);
-			/* ignore this purged cqe */
-			goto repoll;
-		}
-		spin_lock_irqsave(&qp->spinlock_s, flags);
-		purgeflag = qp->sqerr_purgeflag;
-		spin_unlock_irqrestore(&qp->spinlock_s, flags);
-
-		if (purgeflag) {
-			ehca_dbg(cq->device,
-				 "Got CQE with purged bit qp_num=%x src_qp=%x",
-				 cqe->local_qp_number, cqe->remote_qp_number);
-			if (ehca_debug_level >= 2)
-				ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
-					 cqe->local_qp_number,
-					 cqe->remote_qp_number);
-			/*
-			 * ignore this to avoid double cqes of bad wqe
-			 * that caused sqe and turn off purge flag
-			 */
-			qp->sqerr_purgeflag = 0;
-			goto repoll;
-		}
-	}
-
-	is_error = cqe->status & WC_STATUS_ERROR_BIT;
-
-	/* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
-	if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
-		ehca_dbg(cq->device,
-			 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
-			 is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
-		ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
-			 my_cq, my_cq->cq_number);
-		ehca_dbg(cq->device,
-			 "ehca_cq=%p cq_num=%x -------------------------",
-			 my_cq, my_cq->cq_number);
-	}
-
-	read_lock(&ehca_qp_idr_lock);
-	my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
-	read_unlock(&ehca_qp_idr_lock);
-	if (!my_qp)
-		goto repoll;
-	wc->qp = &my_qp->ib_qp;
-
-	qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
-	if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
-		/* We got a send completion. */
-		qmap = &my_qp->sq_map;
-	else
-		/* We got a receive completion. */
-		qmap = &my_qp->rq_map;
-
-	/* advance the tail pointer */
-	qmap->tail = qmap_tail_idx;
-
-	if (is_error) {
-		/*
-		 * set left_to_poll to 0 because in error state, we will not
-		 * get any additional CQEs
-		 */
-		my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
-							my_qp->sq_map.entries);
-		my_qp->sq_map.left_to_poll = 0;
-		ehca_add_to_err_list(my_qp, 1);
-
-		my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
-							my_qp->rq_map.entries);
-		my_qp->rq_map.left_to_poll = 0;
-		if (HAS_RQ(my_qp))
-			ehca_add_to_err_list(my_qp, 0);
-	}
-
-	qmap_entry = &qmap->map[qmap_tail_idx];
-	if (qmap_entry->reported) {
-		ehca_warn(cq->device, "Double cqe on qp_num=%#x",
-				my_qp->real_qp_num);
-		/* found a double cqe, discard it and read next one */
-		goto repoll;
-	}
-
-	wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
-	qmap_entry->reported = 1;
-
-	/* if left_to_poll is decremented to 0, add the QP to the error list */
-	if (qmap->left_to_poll > 0) {
-		qmap->left_to_poll--;
-		if ((my_qp->sq_map.left_to_poll == 0) &&
-				(my_qp->rq_map.left_to_poll == 0)) {
-			ehca_add_to_err_list(my_qp, 1);
-			if (HAS_RQ(my_qp))
-				ehca_add_to_err_list(my_qp, 0);
-		}
-	}
-
-	/* eval ib_wc_opcode */
-	wc->opcode = ib_wc_opcode[cqe->optype]-1;
-	if (unlikely(wc->opcode == -1)) {
-		ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
-			 "ehca_cq=%p cq_num=%x",
-			 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
-		/* dump cqe for other infos */
-		ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
-			 my_cq, my_cq->cq_number);
-		/* update also queue adder to throw away this entry!!! */
-		goto repoll;
-	}
-
-	/* eval ib_wc_status */
-	if (unlikely(is_error)) {
-		/* complete with errors */
-		map_ib_wc_status(cqe->status, &wc->status);
-		wc->vendor_err = wc->status;
-	} else
-		wc->status = IB_WC_SUCCESS;
-
-	wc->byte_len = cqe->nr_bytes_transferred;
-	wc->pkey_index = cqe->pkey_index;
-	wc->slid = cqe->rlid;
-	wc->dlid_path_bits = cqe->dlid;
-	wc->src_qp = cqe->remote_qp_number;
-	/*
-	 * HW has "Immed data present" and "GRH present" in bits 6 and 5.
-	 * SW defines those in bits 1 and 0, so we can just shift and mask.
-	 */
-	wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
-	wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
-	wc->sl = cqe->service_level;
-
-poll_cq_one_exit0:
-	if (cqe_count > 0)
-		hipz_update_feca(my_cq, cqe_count);
-
-	return ret;
-}
-
-static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
-			       struct ib_wc *wc, int num_entries,
-			       struct ipz_queue *ipz_queue, int on_sq)
-{
-	int nr = 0;
-	struct ehca_wqe *wqe;
-	u64 offset;
-	struct ehca_queue_map *qmap;
-	struct ehca_qmap_entry *qmap_entry;
-
-	if (on_sq)
-		qmap = &my_qp->sq_map;
-	else
-		qmap = &my_qp->rq_map;
-
-	qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
-	while ((nr < num_entries) && (qmap_entry->reported == 0)) {
-		/* generate flush CQE */
-
-		memset(wc, 0, sizeof(*wc));
-
-		offset = qmap->next_wqe_idx * ipz_queue->qe_size;
-		wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
-		if (!wqe) {
-			ehca_err(cq->device, "Invalid wqe offset=%#llx on "
-				 "qp_num=%#x", offset, my_qp->real_qp_num);
-			return nr;
-		}
-
-		wc->wr_id = replace_wr_id(wqe->work_request_id,
-					  qmap_entry->app_wr_id);
-
-		if (on_sq) {
-			switch (wqe->optype) {
-			case WQE_OPTYPE_SEND:
-				wc->opcode = IB_WC_SEND;
-				break;
-			case WQE_OPTYPE_RDMAWRITE:
-				wc->opcode = IB_WC_RDMA_WRITE;
-				break;
-			case WQE_OPTYPE_RDMAREAD:
-				wc->opcode = IB_WC_RDMA_READ;
-				break;
-			default:
-				ehca_err(cq->device, "Invalid optype=%x",
-						wqe->optype);
-				return nr;
-			}
-		} else
-			wc->opcode = IB_WC_RECV;
-
-		if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
-			wc->ex.imm_data = wqe->immediate_data;
-			wc->wc_flags |= IB_WC_WITH_IMM;
-		}
-
-		wc->status = IB_WC_WR_FLUSH_ERR;
-
-		wc->qp = &my_qp->ib_qp;
-
-		/* mark as reported and advance next_wqe pointer */
-		qmap_entry->reported = 1;
-		qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
-						qmap->entries);
-		qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
-		wc++; nr++;
-	}
-
-	return nr;
-
-}
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
-{
-	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	int nr;
-	struct ehca_qp *err_qp;
-	struct ib_wc *current_wc = wc;
-	int ret = 0;
-	unsigned long flags;
-	int entries_left = num_entries;
-
-	if (num_entries < 1) {
-		ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
-			 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
-		ret = -EINVAL;
-		goto poll_cq_exit0;
-	}
-
-	spin_lock_irqsave(&my_cq->spinlock, flags);
-
-	/* generate flush cqes for send queues */
-	list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
-		nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
-				&err_qp->ipz_squeue, 1);
-		entries_left -= nr;
-		current_wc += nr;
-
-		if (entries_left == 0)
-			break;
-	}
-
-	/* generate flush cqes for receive queues */
-	list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
-		nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
-				&err_qp->ipz_rqueue, 0);
-		entries_left -= nr;
-		current_wc += nr;
-
-		if (entries_left == 0)
-			break;
-	}
-
-	for (nr = 0; nr < entries_left; nr++) {
-		ret = ehca_poll_cq_one(cq, current_wc);
-		if (ret)
-			break;
-		current_wc++;
-	} /* eof for nr */
-	entries_left -= nr;
-
-	spin_unlock_irqrestore(&my_cq->spinlock, flags);
-	if (ret == -EAGAIN  || !ret)
-		ret = num_entries - entries_left;
-
-poll_cq_exit0:
-	return ret;
-}
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
-{
-	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	int ret = 0;
-
-	switch (notify_flags & IB_CQ_SOLICITED_MASK) {
-	case IB_CQ_SOLICITED:
-		hipz_set_cqx_n0(my_cq, 1);
-		break;
-	case IB_CQ_NEXT_COMP:
-		hipz_set_cqx_n1(my_cq, 1);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
-		unsigned long spl_flags;
-		spin_lock_irqsave(&my_cq->spinlock, spl_flags);
-		ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
-		spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
-	}
-
-	return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
deleted file mode 100644
index 376b031..0000000
--- a/drivers/staging/rdma/ehca/ehca_sqp.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  SQP functions
- *
- *  Authors: Khadija Souissi <souissi@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rdma/ib_mad.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define IB_MAD_STATUS_REDIRECT		cpu_to_be16(0x0002)
-#define IB_MAD_STATUS_UNSUP_VERSION	cpu_to_be16(0x0004)
-#define IB_MAD_STATUS_UNSUP_METHOD	cpu_to_be16(0x0008)
-
-#define IB_PMA_CLASS_PORT_INFO		cpu_to_be16(0x0001)
-
-/**
- * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
- * pair is created successfully, the corresponding port gets active.
- *
- * Define Special Queue pair 0 (SMI QP) is still not supported.
- *
- * @qp_init_attr: Queue pair init attributes with port and queue pair type
- */
-
-u64 ehca_define_sqp(struct ehca_shca *shca,
-		    struct ehca_qp *ehca_qp,
-		    struct ib_qp_init_attr *qp_init_attr)
-{
-	u32 pma_qp_nr, bma_qp_nr;
-	u64 ret;
-	u8 port = qp_init_attr->port_num;
-	int counter;
-
-	shca->sport[port - 1].port_state = IB_PORT_DOWN;
-
-	switch (qp_init_attr->qp_type) {
-	case IB_QPT_SMI:
-		/* function not supported yet */
-		break;
-	case IB_QPT_GSI:
-		ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
-					 ehca_qp->ipz_qp_handle,
-					 ehca_qp->galpas.kernel,
-					 (u32) qp_init_attr->port_num,
-					 &pma_qp_nr, &bma_qp_nr);
-
-		if (ret != H_SUCCESS) {
-			ehca_err(&shca->ib_device,
-				 "Can't define AQP1 for port %x. h_ret=%lli",
-				 port, ret);
-			return ret;
-		}
-		shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
-		ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
-			 port, pma_qp_nr);
-		break;
-	default:
-		ehca_err(&shca->ib_device, "invalid qp_type=%x",
-			 qp_init_attr->qp_type);
-		return H_PARAMETER;
-	}
-
-	if (ehca_nr_ports < 0) /* autodetect mode */
-		return H_SUCCESS;
-
-	for (counter = 0;
-	     shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
-		     counter < ehca_port_act_time;
-	     counter++) {
-		ehca_dbg(&shca->ib_device, "... wait until port %x is active",
-			 port);
-		msleep_interruptible(1000);
-	}
-
-	if (counter == ehca_port_act_time) {
-		ehca_err(&shca->ib_device, "Port %x is not active.", port);
-		return H_HARDWARE;
-	}
-
-	return H_SUCCESS;
-}
-
-struct ib_perf {
-	struct ib_mad_hdr mad_hdr;
-	u8 reserved[40];
-	u8 data[192];
-} __attribute__ ((packed));
-
-/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
-struct tcslfl {
-	u32 tc:8;
-	u32 sl:4;
-	u32 fl:20;
-} __attribute__ ((packed));
-
-/* IP Version/TC/FL packed into 32 bits, as in GRH */
-struct vertcfl {
-	u32 ver:4;
-	u32 tc:8;
-	u32 fl:20;
-} __attribute__ ((packed));
-
-static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
-			     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-			     const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
-	const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
-	struct ib_perf *out_perf = (struct ib_perf *)out_mad;
-	struct ib_class_port_info *poi =
-		(struct ib_class_port_info *)out_perf->data;
-	struct tcslfl *tcslfl =
-		(struct tcslfl *)&poi->redirect_tcslfl;
-	struct ehca_shca *shca =
-		container_of(ibdev, struct ehca_shca, ib_device);
-	struct ehca_sport *sport = &shca->sport[port_num - 1];
-
-	ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
-
-	*out_mad = *in_mad;
-
-	if (in_perf->mad_hdr.class_version != 1) {
-		ehca_warn(ibdev, "Unsupported class_version=%x",
-			  in_perf->mad_hdr.class_version);
-		out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
-		goto perf_reply;
-	}
-
-	switch (in_perf->mad_hdr.method) {
-	case IB_MGMT_METHOD_GET:
-	case IB_MGMT_METHOD_SET:
-		/* set class port info for redirection */
-		out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
-		out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
-		memset(poi, 0, sizeof(*poi));
-		poi->base_version = 1;
-		poi->class_version = 1;
-		poi->resp_time_value = 18;
-
-		/* copy local routing information from WC where applicable */
-		tcslfl->sl         = in_wc->sl;
-		poi->redirect_lid  =
-			sport->saved_attr.lid | in_wc->dlid_path_bits;
-		poi->redirect_qp   = sport->pma_qp_nr;
-		poi->redirect_qkey = IB_QP1_QKEY;
-
-		ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
-				&poi->redirect_pkey);
-
-		/* if request was globally routed, copy route info */
-		if (in_grh) {
-			const struct vertcfl *vertcfl =
-				(const struct vertcfl *)&in_grh->version_tclass_flow;
-			memcpy(poi->redirect_gid, in_grh->dgid.raw,
-			       sizeof(poi->redirect_gid));
-			tcslfl->tc        = vertcfl->tc;
-			tcslfl->fl        = vertcfl->fl;
-		} else
-			/* else only fill in default GID */
-			ehca_query_gid(ibdev, port_num, 0,
-				       (union ib_gid *)&poi->redirect_gid);
-
-		ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
-			 sport->saved_attr.lid, sport->pma_qp_nr);
-		break;
-
-	case IB_MGMT_METHOD_GET_RESP:
-		return IB_MAD_RESULT_FAILURE;
-
-	default:
-		out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
-		break;
-	}
-
-perf_reply:
-	out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
-
-	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-		     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-		     const struct ib_mad_hdr *in, size_t in_mad_size,
-		     struct ib_mad_hdr *out, size_t *out_mad_size,
-		     u16 *out_mad_pkey_index)
-{
-	int ret;
-	const struct ib_mad *in_mad = (const struct ib_mad *)in;
-	struct ib_mad *out_mad = (struct ib_mad *)out;
-
-	if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
-			 *out_mad_size != sizeof(*out_mad)))
-		return IB_MAD_RESULT_FAILURE;
-
-	if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
-		return IB_MAD_RESULT_FAILURE;
-
-	/* accept only pma request */
-	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
-		return IB_MAD_RESULT_SUCCESS;
-
-	ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
-	ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
-				in_mad, out_mad);
-
-	return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
deleted file mode 100644
index d280b12..0000000
--- a/drivers/staging/rdma/ehca/ehca_tools.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  auxiliary functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Khadija Souissi <souissik@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef EHCA_TOOLS_H
-#define EHCA_TOOLS_H
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/vmalloc.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/device.h>
-
-#include <linux/atomic.h>
-#include <asm/ibmebus.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/hvcall.h>
-
-extern int ehca_debug_level;
-
-#define ehca_dbg(ib_dev, format, arg...) \
-	do { \
-		if (unlikely(ehca_debug_level)) \
-			dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
-				   "PU%04x EHCA_DBG:%s " format "\n", \
-				   raw_smp_processor_id(), __func__, \
-				   ## arg); \
-	} while (0)
-
-#define ehca_info(ib_dev, format, arg...) \
-	dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
-		 raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_warn(ib_dev, format, arg...) \
-	dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
-		 raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_err(ib_dev, format, arg...) \
-	dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
-		raw_smp_processor_id(), __func__, ## arg)
-
-/* use this one only if no ib_dev available */
-#define ehca_gen_dbg(format, arg...) \
-	do { \
-		if (unlikely(ehca_debug_level)) \
-			printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
-			       raw_smp_processor_id(), __func__, ## arg); \
-	} while (0)
-
-#define ehca_gen_warn(format, arg...) \
-	printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
-	       raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_gen_err(format, arg...) \
-	printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
-	       raw_smp_processor_id(), __func__, ## arg)
-
-/**
- * ehca_dmp - printk a memory block, whose length is n*8 bytes.
- * Each line has the following layout:
- * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
- */
-#define ehca_dmp(adr, len, format, args...) \
-	do { \
-		unsigned int x; \
-		unsigned int l = (unsigned int)(len); \
-		unsigned char *deb = (unsigned char *)(adr); \
-		for (x = 0; x < l; x += 16) { \
-			printk(KERN_INFO "EHCA_DMP:%s " format \
-			       " adr=%p ofs=%04x %016llx %016llx\n", \
-			       __func__, ##args, deb, x, \
-			       *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
-			deb += 16; \
-		} \
-	} while (0)
-
-/* define a bitmask, little endian version */
-#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
-
-/* define a bitmask, the ibm way... */
-#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
-
-/* internal function, don't use */
-#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
-
-/* internal function, don't use */
-#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
-
-/**
- * EHCA_BMASK_SET - return value shifted and masked by mask
- * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
- * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
- * in variable
- */
-#define EHCA_BMASK_SET(mask, value) \
-	((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
-
-/**
- * EHCA_BMASK_GET - extract a parameter from value by mask
- */
-#define EHCA_BMASK_GET(mask, value) \
-	(EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
-
-/* Converts ehca to ib return code */
-int ehca2ib_return_code(u64 ehca_rc);
-
-#endif /* EHCA_TOOLS_H */
diff --git a/drivers/staging/rdma/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
deleted file mode 100644
index 1a1d5d9..0000000
--- a/drivers/staging/rdma/ehca/ehca_uverbs.c
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  userspace support verbs
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
-					struct ib_udata *udata)
-{
-	struct ehca_ucontext *my_context;
-
-	my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
-	if (!my_context) {
-		ehca_err(device, "Out of memory device=%p", device);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return &my_context->ib_ucontext;
-}
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context)
-{
-	kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
-	return 0;
-}
-
-static void ehca_mm_open(struct vm_area_struct *vma)
-{
-	u32 *count = (u32 *)vma->vm_private_data;
-	if (!count) {
-		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
-			     vma->vm_start, vma->vm_end);
-		return;
-	}
-	(*count)++;
-	if (!(*count))
-		ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
-			     vma->vm_start, vma->vm_end);
-	ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
-		     vma->vm_start, vma->vm_end, *count);
-}
-
-static void ehca_mm_close(struct vm_area_struct *vma)
-{
-	u32 *count = (u32 *)vma->vm_private_data;
-	if (!count) {
-		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
-			     vma->vm_start, vma->vm_end);
-		return;
-	}
-	(*count)--;
-	ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
-		     vma->vm_start, vma->vm_end, *count);
-}
-
-static const struct vm_operations_struct vm_ops = {
-	.open =	ehca_mm_open,
-	.close = ehca_mm_close,
-};
-
-static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
-			u32 *mm_count)
-{
-	int ret;
-	u64 vsize, physical;
-
-	vsize = vma->vm_end - vma->vm_start;
-	if (vsize < EHCA_PAGESIZE) {
-		ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
-		return -EINVAL;
-	}
-
-	physical = galpas->user.fw_handle;
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-	ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
-	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
-	ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
-			   vma->vm_page_prot);
-	if (unlikely(ret)) {
-		ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
-		return -ENOMEM;
-	}
-
-	vma->vm_private_data = mm_count;
-	(*mm_count)++;
-	vma->vm_ops = &vm_ops;
-
-	return 0;
-}
-
-static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
-			   u32 *mm_count)
-{
-	int ret;
-	u64 start, ofs;
-	struct page *page;
-
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-	start = vma->vm_start;
-	for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
-		u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
-		page = virt_to_page(virt_addr);
-		ret = vm_insert_page(vma, start, page);
-		if (unlikely(ret)) {
-			ehca_gen_err("vm_insert_page() failed rc=%i", ret);
-			return ret;
-		}
-		start += PAGE_SIZE;
-	}
-	vma->vm_private_data = mm_count;
-	(*mm_count)++;
-	vma->vm_ops = &vm_ops;
-
-	return 0;
-}
-
-static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
-			u32 rsrc_type)
-{
-	int ret;
-
-	switch (rsrc_type) {
-	case 0: /* galpa fw handle */
-		ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
-		ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
-		if (unlikely(ret)) {
-			ehca_err(cq->ib_cq.device,
-				 "ehca_mmap_fw() failed rc=%i cq_num=%x",
-				 ret, cq->cq_number);
-			return ret;
-		}
-		break;
-
-	case 1: /* cq queue_addr */
-		ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
-		ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
-		if (unlikely(ret)) {
-			ehca_err(cq->ib_cq.device,
-				 "ehca_mmap_queue() failed rc=%i cq_num=%x",
-				 ret, cq->cq_number);
-			return ret;
-		}
-		break;
-
-	default:
-		ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
-			 rsrc_type, cq->cq_number);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
-			u32 rsrc_type)
-{
-	int ret;
-
-	switch (rsrc_type) {
-	case 0: /* galpa fw handle */
-		ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
-		ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
-		if (unlikely(ret)) {
-			ehca_err(qp->ib_qp.device,
-				 "remap_pfn_range() failed ret=%i qp_num=%x",
-				 ret, qp->ib_qp.qp_num);
-			return -ENOMEM;
-		}
-		break;
-
-	case 1: /* qp rqueue_addr */
-		ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
-		ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
-				      &qp->mm_count_rqueue);
-		if (unlikely(ret)) {
-			ehca_err(qp->ib_qp.device,
-				 "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
-				 ret, qp->ib_qp.qp_num);
-			return ret;
-		}
-		break;
-
-	case 2: /* qp squeue_addr */
-		ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
-		ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
-				      &qp->mm_count_squeue);
-		if (unlikely(ret)) {
-			ehca_err(qp->ib_qp.device,
-				 "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
-				 ret, qp->ib_qp.qp_num);
-			return ret;
-		}
-		break;
-
-	default:
-		ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
-			 rsrc_type, qp->ib_qp.qp_num);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-	u64 fileoffset = vma->vm_pgoff;
-	u32 idr_handle = fileoffset & 0x1FFFFFF;
-	u32 q_type = (fileoffset >> 27) & 0x1;	  /* CQ, QP,...        */
-	u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
-	u32 ret;
-	struct ehca_cq *cq;
-	struct ehca_qp *qp;
-	struct ib_uobject *uobject;
-
-	switch (q_type) {
-	case  0: /* CQ */
-		read_lock(&ehca_cq_idr_lock);
-		cq = idr_find(&ehca_cq_idr, idr_handle);
-		read_unlock(&ehca_cq_idr_lock);
-
-		/* make sure this mmap really belongs to the authorized user */
-		if (!cq)
-			return -EINVAL;
-
-		if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
-			return -EINVAL;
-
-		ret = ehca_mmap_cq(vma, cq, rsrc_type);
-		if (unlikely(ret)) {
-			ehca_err(cq->ib_cq.device,
-				 "ehca_mmap_cq() failed rc=%i cq_num=%x",
-				 ret, cq->cq_number);
-			return ret;
-		}
-		break;
-
-	case 1: /* QP */
-		read_lock(&ehca_qp_idr_lock);
-		qp = idr_find(&ehca_qp_idr, idr_handle);
-		read_unlock(&ehca_qp_idr_lock);
-
-		/* make sure this mmap really belongs to the authorized user */
-		if (!qp)
-			return -EINVAL;
-
-		uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
-		if (!uobject || uobject->context != context)
-			return -EINVAL;
-
-		ret = ehca_mmap_qp(vma, qp, rsrc_type);
-		if (unlikely(ret)) {
-			ehca_err(qp->ib_qp.device,
-				 "ehca_mmap_qp() failed rc=%i qp_num=%x",
-				 ret, qp->ib_qp.qp_num);
-			return ret;
-		}
-		break;
-
-	default:
-		ehca_gen_err("bad queue type %x", q_type);
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
deleted file mode 100644
index 89517ff..0000000
--- a/drivers/staging/rdma/ehca/hcp_if.c
+++ /dev/null
@@ -1,949 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware Infiniband Interface code for POWER
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm/hvcall.h>
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hcp_phyp.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define H_ALL_RES_QP_ENHANCED_OPS       EHCA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_PTE_PIN            EHCA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_SERVICE_TYPE       EHCA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_STORAGE            EHCA_BMASK_IBM(16, 17)
-#define H_ALL_RES_QP_LL_RQ_CQE_POSTING  EHCA_BMASK_IBM(18, 18)
-#define H_ALL_RES_QP_LL_SQ_CQE_POSTING  EHCA_BMASK_IBM(19, 21)
-#define H_ALL_RES_QP_SIGNALING_TYPE     EHCA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_UD_AV_LKEY_CTRL    EHCA_BMASK_IBM(31, 31)
-#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
-#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
-#define H_ALL_RES_QP_RESOURCE_TYPE      EHCA_BMASK_IBM(56, 63)
-
-#define H_ALL_RES_QP_MAX_OUTST_SEND_WR  EHCA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_MAX_OUTST_RECV_WR  EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_MAX_SEND_SGE       EHCA_BMASK_IBM(32, 39)
-#define H_ALL_RES_QP_MAX_RECV_SGE       EHCA_BMASK_IBM(40, 47)
-
-#define H_ALL_RES_QP_UD_AV_LKEY         EHCA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SRQ_QP_TOKEN       EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SRQ_QP_HANDLE      EHCA_BMASK_IBM(0, 64)
-#define H_ALL_RES_QP_SRQ_LIMIT          EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_SRQ_QPN            EHCA_BMASK_IBM(40, 63)
-
-#define H_ALL_RES_QP_ACT_OUTST_SEND_WR  EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_OUTST_RECV_WR  EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_ACT_SEND_SGE       EHCA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_RECV_SGE       EHCA_BMASK_IBM(24, 31)
-
-#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(32, 63)
-
-#define H_MP_INIT_TYPE                  EHCA_BMASK_IBM(44, 47)
-#define H_MP_SHUTDOWN                   EHCA_BMASK_IBM(48, 48)
-#define H_MP_RESET_QKEY_CTR             EHCA_BMASK_IBM(49, 49)
-
-#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
-#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
-#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
-
-static DEFINE_SPINLOCK(hcall_lock);
-
-static long ehca_plpar_hcall_norets(unsigned long opcode,
-				    unsigned long arg1,
-				    unsigned long arg2,
-				    unsigned long arg3,
-				    unsigned long arg4,
-				    unsigned long arg5,
-				    unsigned long arg6,
-				    unsigned long arg7)
-{
-	long ret;
-	int i, sleep_msecs;
-	unsigned long flags = 0;
-
-	if (unlikely(ehca_debug_level >= 2))
-		ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
-			     opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
-
-	for (i = 0; i < 5; i++) {
-		/* serialize hCalls to work around firmware issue */
-		if (ehca_lock_hcalls)
-			spin_lock_irqsave(&hcall_lock, flags);
-
-		ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
-					 arg5, arg6, arg7);
-
-		if (ehca_lock_hcalls)
-			spin_unlock_irqrestore(&hcall_lock, flags);
-
-		if (H_IS_LONG_BUSY(ret)) {
-			sleep_msecs = get_longbusy_msecs(ret);
-			msleep_interruptible(sleep_msecs);
-			continue;
-		}
-
-		if (ret < H_SUCCESS)
-			ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
-				     opcode, ret, arg1, arg2, arg3,
-				     arg4, arg5, arg6, arg7);
-		else
-			if (unlikely(ehca_debug_level >= 2))
-				ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
-
-		return ret;
-	}
-
-	return H_BUSY;
-}
-
-static long ehca_plpar_hcall9(unsigned long opcode,
-			      unsigned long *outs, /* array of 9 outputs */
-			      unsigned long arg1,
-			      unsigned long arg2,
-			      unsigned long arg3,
-			      unsigned long arg4,
-			      unsigned long arg5,
-			      unsigned long arg6,
-			      unsigned long arg7,
-			      unsigned long arg8,
-			      unsigned long arg9)
-{
-	long ret;
-	int i, sleep_msecs;
-	unsigned long flags = 0;
-
-	if (unlikely(ehca_debug_level >= 2))
-		ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
-			     arg1, arg2, arg3, arg4, arg5,
-			     arg6, arg7, arg8, arg9);
-
-	for (i = 0; i < 5; i++) {
-		/* serialize hCalls to work around firmware issue */
-		if (ehca_lock_hcalls)
-			spin_lock_irqsave(&hcall_lock, flags);
-
-		ret = plpar_hcall9(opcode, outs,
-				   arg1, arg2, arg3, arg4, arg5,
-				   arg6, arg7, arg8, arg9);
-
-		if (ehca_lock_hcalls)
-			spin_unlock_irqrestore(&hcall_lock, flags);
-
-		if (H_IS_LONG_BUSY(ret)) {
-			sleep_msecs = get_longbusy_msecs(ret);
-			msleep_interruptible(sleep_msecs);
-			continue;
-		}
-
-		if (ret < H_SUCCESS) {
-			ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
-				     opcode, arg1, arg2, arg3, arg4, arg5,
-				     arg6, arg7, arg8, arg9);
-			ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
-				     ret, outs[0], outs[1], outs[2], outs[3],
-				     outs[4], outs[5], outs[6], outs[7],
-				     outs[8]);
-		} else if (unlikely(ehca_debug_level >= 2))
-			ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
-				     ret, outs[0], outs[1], outs[2], outs[3],
-				     outs[4], outs[5], outs[6], outs[7],
-				     outs[8]);
-		return ret;
-	}
-
-	return H_BUSY;
-}
-
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
-			     struct ehca_pfeq *pfeq,
-			     const u32 neq_control,
-			     const u32 number_of_entries,
-			     struct ipz_eq_handle *eq_handle,
-			     u32 *act_nr_of_entries,
-			     u32 *act_pages,
-			     u32 *eq_ist)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-	u64 allocate_controls;
-
-	/* resource type */
-	allocate_controls = 3ULL;
-
-	/* ISN is associated */
-	if (neq_control != 1)
-		allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
-	else /* notification event queue */
-		allocate_controls = (1ULL << 63) | allocate_controls;
-
-	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-				adapter_handle.handle,  /* r4 */
-				allocate_controls,      /* r5 */
-				number_of_entries,      /* r6 */
-				0, 0, 0, 0, 0, 0);
-	eq_handle->handle = outs[0];
-	*act_nr_of_entries = (u32)outs[3];
-	*act_pages = (u32)outs[4];
-	*eq_ist = (u32)outs[5];
-
-	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resource - ret=%lli ", ret);
-
-	return ret;
-}
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
-		       struct ipz_eq_handle eq_handle,
-		       const u64 event_mask)
-{
-	return ehca_plpar_hcall_norets(H_RESET_EVENTS,
-				       adapter_handle.handle, /* r4 */
-				       eq_handle.handle,      /* r5 */
-				       event_mask,	      /* r6 */
-				       0, 0, 0, 0);
-}
-
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
-			     struct ehca_cq *cq,
-			     struct ehca_alloc_cq_parms *param)
-{
-	int rc;
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-				adapter_handle.handle,   /* r4  */
-				2,	                 /* r5  */
-				param->eq_handle.handle, /* r6  */
-				cq->token,	         /* r7  */
-				param->nr_cqe,           /* r8  */
-				0, 0, 0, 0);
-	cq->ipz_cq_handle.handle = outs[0];
-	param->act_nr_of_entries = (u32)outs[3];
-	param->act_pages = (u32)outs[4];
-
-	if (ret == H_SUCCESS) {
-		rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
-		if (rc) {
-			ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
-				     rc, outs[5]);
-
-			ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-						adapter_handle.handle,     /* r4 */
-						cq->ipz_cq_handle.handle,  /* r5 */
-						0, 0, 0, 0, 0);
-			ret = H_NO_MEM;
-		}
-	}
-
-	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-	return ret;
-}
-
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
-			     struct ehca_alloc_qp_parms *parms, int is_user)
-{
-	int rc;
-	u64 ret;
-	u64 allocate_controls, max_r10_reg, r11, r12;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	allocate_controls =
-		EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
-				 parms->squeue.page_size)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
-				 parms->rqueue.page_size)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
-				 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
-		| EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
-				 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
-		| EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
-				 parms->ud_av_l_key_ctl)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
-
-	max_r10_reg =
-		EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
-			       parms->squeue.max_wr + 1)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
-				 parms->rqueue.max_wr + 1)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
-				 parms->squeue.max_sge)
-		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
-				 parms->rqueue.max_sge);
-
-	r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
-
-	if (parms->ext_type == EQPT_SRQ)
-		r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
-	else
-		r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
-
-	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-				adapter_handle.handle,	           /* r4  */
-				allocate_controls,	           /* r5  */
-				parms->send_cq_handle.handle,
-				parms->recv_cq_handle.handle,
-				parms->eq_handle.handle,
-				((u64)parms->token << 32) | parms->pd.value,
-				max_r10_reg, r11, r12);
-
-	parms->qp_handle.handle = outs[0];
-	parms->real_qp_num = (u32)outs[1];
-	parms->squeue.act_nr_wqes =
-		(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
-	parms->rqueue.act_nr_wqes =
-		(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
-	parms->squeue.act_nr_sges =
-		(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
-	parms->rqueue.act_nr_sges =
-		(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
-	parms->squeue.queue_size =
-		(u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
-	parms->rqueue.queue_size =
-		(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
-
-	if (ret == H_SUCCESS) {
-		rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
-		if (rc) {
-			ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
-				     rc, outs[6]);
-
-			ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-						adapter_handle.handle,     /* r4 */
-						parms->qp_handle.handle,  /* r5 */
-						0, 0, 0, 0, 0);
-			ret = H_NO_MEM;
-		}
-	}
-
-	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-	return ret;
-}
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
-		      const u8 port_id,
-		      struct hipz_query_port *query_port_response_block)
-{
-	u64 ret;
-	u64 r_cb = __pa(query_port_response_block);
-
-	if (r_cb & (EHCA_PAGESIZE-1)) {
-		ehca_gen_err("response block not page aligned");
-		return H_PARAMETER;
-	}
-
-	ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
-				      adapter_handle.handle, /* r4 */
-				      port_id,	             /* r5 */
-				      r_cb,	             /* r6 */
-				      0, 0, 0, 0);
-
-	if (ehca_debug_level >= 2)
-		ehca_dmp(query_port_response_block, 64, "response_block");
-
-	return ret;
-}
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
-		       const u8 port_id, const u32 port_cap,
-		       const u8 init_type, const int modify_mask)
-{
-	u64 port_attributes = port_cap;
-
-	if (modify_mask & IB_PORT_SHUTDOWN)
-		port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
-	if (modify_mask & IB_PORT_INIT_TYPE)
-		port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
-	if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
-		port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
-
-	return ehca_plpar_hcall_norets(H_MODIFY_PORT,
-				       adapter_handle.handle, /* r4 */
-				       port_id,               /* r5 */
-				       port_attributes,       /* r6 */
-				       0, 0, 0, 0);
-}
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
-		     struct hipz_query_hca *query_hca_rblock)
-{
-	u64 r_cb = __pa(query_hca_rblock);
-
-	if (r_cb & (EHCA_PAGESIZE-1)) {
-		ehca_gen_err("response_block=%p not page aligned",
-			     query_hca_rblock);
-		return H_PARAMETER;
-	}
-
-	return ehca_plpar_hcall_norets(H_QUERY_HCA,
-				       adapter_handle.handle, /* r4 */
-				       r_cb,                  /* r5 */
-				       0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
-			  const u8 pagesize,
-			  const u8 queue_type,
-			  const u64 resource_handle,
-			  const u64 logical_address_of_page,
-			  u64 count)
-{
-	return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
-				       adapter_handle.handle,      /* r4  */
-				       (u64)queue_type | ((u64)pagesize) << 8,
-				       /* r5  */
-				       resource_handle,	           /* r6  */
-				       logical_address_of_page,    /* r7  */
-				       count,	                   /* r8  */
-				       0, 0);
-}
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
-			     const struct ipz_eq_handle eq_handle,
-			     struct ehca_pfeq *pfeq,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count)
-{
-	if (count != 1) {
-		ehca_gen_err("Ppage counter=%llx", count);
-		return H_PARAMETER;
-	}
-	return hipz_h_register_rpage(adapter_handle,
-				     pagesize,
-				     queue_type,
-				     eq_handle.handle,
-				     logical_address_of_page, count);
-}
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
-			   u32 ist)
-{
-	u64 ret;
-	ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
-				      adapter_handle.handle, /* r4 */
-				      ist,                   /* r5 */
-				      0, 0, 0, 0, 0);
-
-	if (ret != H_SUCCESS && ret != H_BUSY)
-		ehca_gen_err("Could not query interrupt state.");
-
-	return ret;
-}
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
-			     const struct ipz_cq_handle cq_handle,
-			     struct ehca_pfcq *pfcq,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count,
-			     const struct h_galpa gal)
-{
-	if (count != 1) {
-		ehca_gen_err("Page counter=%llx", count);
-		return H_PARAMETER;
-	}
-
-	return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
-				     cq_handle.handle, logical_address_of_page,
-				     count);
-}
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
-			     const struct ipz_qp_handle qp_handle,
-			     struct ehca_pfqp *pfqp,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count,
-			     const struct h_galpa galpa)
-{
-	if (count > 1) {
-		ehca_gen_err("Page counter=%llx", count);
-		return H_PARAMETER;
-	}
-
-	return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
-				     qp_handle.handle, logical_address_of_page,
-				     count);
-}
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
-			       const struct ipz_qp_handle qp_handle,
-			       struct ehca_pfqp *pfqp,
-			       void **log_addr_next_sq_wqe2processed,
-			       void **log_addr_next_rq_wqe2processed,
-			       int dis_and_get_function_code)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
-				adapter_handle.handle,     /* r4 */
-				dis_and_get_function_code, /* r5 */
-				qp_handle.handle,	   /* r6 */
-				0, 0, 0, 0, 0, 0);
-	if (log_addr_next_sq_wqe2processed)
-		*log_addr_next_sq_wqe2processed = (void *)outs[0];
-	if (log_addr_next_rq_wqe2processed)
-		*log_addr_next_rq_wqe2processed = (void *)outs[1];
-
-	return ret;
-}
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
-		     const struct ipz_qp_handle qp_handle,
-		     struct ehca_pfqp *pfqp,
-		     const u64 update_mask,
-		     struct hcp_modify_qp_control_block *mqpcb,
-		     struct h_galpa gal)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-	ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
-				adapter_handle.handle, /* r4 */
-				qp_handle.handle,      /* r5 */
-				update_mask,	       /* r6 */
-				__pa(mqpcb),	       /* r7 */
-				0, 0, 0, 0, 0);
-
-	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Insufficient resources ret=%lli", ret);
-
-	return ret;
-}
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
-		    const struct ipz_qp_handle qp_handle,
-		    struct ehca_pfqp *pfqp,
-		    struct hcp_modify_qp_control_block *qqpcb,
-		    struct h_galpa gal)
-{
-	return ehca_plpar_hcall_norets(H_QUERY_QP,
-				       adapter_handle.handle, /* r4 */
-				       qp_handle.handle,      /* r5 */
-				       __pa(qqpcb),	      /* r6 */
-				       0, 0, 0, 0);
-}
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
-		      struct ehca_qp *qp)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = hcp_galpas_dtor(&qp->galpas);
-	if (ret) {
-		ehca_gen_err("Could not destruct qp->galpas");
-		return H_RESOURCE;
-	}
-	ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
-				adapter_handle.handle,     /* r4 */
-				/* function code */
-				1,	                   /* r5 */
-				qp->ipz_qp_handle.handle,  /* r6 */
-				0, 0, 0, 0, 0, 0);
-	if (ret == H_HARDWARE)
-		ehca_gen_err("HCA not operational. ret=%lli", ret);
-
-	ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-				      adapter_handle.handle,     /* r4 */
-				      qp->ipz_qp_handle.handle,  /* r5 */
-				      0, 0, 0, 0, 0);
-
-	if (ret == H_RESOURCE)
-		ehca_gen_err("Resource still in use. ret=%lli", ret);
-
-	return ret;
-}
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u32 port)
-{
-	return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
-				       adapter_handle.handle, /* r4 */
-				       qp_handle.handle,      /* r5 */
-				       port,                  /* r6 */
-				       0, 0, 0, 0);
-}
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u32 port, u32 * pma_qp_nr,
-		       u32 * bma_qp_nr)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
-				adapter_handle.handle, /* r4 */
-				qp_handle.handle,      /* r5 */
-				port,	               /* r6 */
-				0, 0, 0, 0, 0, 0);
-	*pma_qp_nr = (u32)outs[0];
-	*bma_qp_nr = (u32)outs[1];
-
-	if (ret == H_ALIAS_EXIST)
-		ehca_gen_err("AQP1 already exists. ret=%lli", ret);
-
-	return ret;
-}
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u16 mcg_dlid,
-		       u64 subnet_prefix, u64 interface_id)
-{
-	u64 ret;
-
-	ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
-				      adapter_handle.handle,  /* r4 */
-				      qp_handle.handle,       /* r5 */
-				      mcg_dlid,               /* r6 */
-				      interface_id,           /* r7 */
-				      subnet_prefix,          /* r8 */
-				      0, 0);
-
-	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-	return ret;
-}
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u16 mcg_dlid,
-		       u64 subnet_prefix, u64 interface_id)
-{
-	return ehca_plpar_hcall_norets(H_DETACH_MCQP,
-				       adapter_handle.handle, /* r4 */
-				       qp_handle.handle,      /* r5 */
-				       mcg_dlid,              /* r6 */
-				       interface_id,          /* r7 */
-				       subnet_prefix,         /* r8 */
-				       0, 0);
-}
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
-		      struct ehca_cq *cq,
-		      u8 force_flag)
-{
-	u64 ret;
-
-	ret = hcp_galpas_dtor(&cq->galpas);
-	if (ret) {
-		ehca_gen_err("Could not destruct cp->galpas");
-		return H_RESOURCE;
-	}
-
-	ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-				      adapter_handle.handle,     /* r4 */
-				      cq->ipz_cq_handle.handle,  /* r5 */
-				      force_flag != 0 ? 1L : 0L, /* r6 */
-				      0, 0, 0, 0);
-
-	if (ret == H_RESOURCE)
-		ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
-
-	return ret;
-}
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
-		      struct ehca_eq *eq)
-{
-	u64 ret;
-
-	ret = hcp_galpas_dtor(&eq->galpas);
-	if (ret) {
-		ehca_gen_err("Could not destruct eq->galpas");
-		return H_RESOURCE;
-	}
-
-	ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-				      adapter_handle.handle,     /* r4 */
-				      eq->ipz_eq_handle.handle,  /* r5 */
-				      0, 0, 0, 0, 0);
-
-	if (ret == H_RESOURCE)
-		ehca_gen_err("Resource in use. ret=%lli ", ret);
-
-	return ret;
-}
-
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
-			     const struct ehca_mr *mr,
-			     const u64 vaddr,
-			     const u64 length,
-			     const u32 access_ctrl,
-			     const struct ipz_pd pd,
-			     struct ehca_mr_hipzout_parms *outparms)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-				adapter_handle.handle,            /* r4 */
-				5,                                /* r5 */
-				vaddr,                            /* r6 */
-				length,                           /* r7 */
-				(((u64)access_ctrl) << 32ULL),    /* r8 */
-				pd.value,                         /* r9 */
-				0, 0, 0);
-	outparms->handle.handle = outs[0];
-	outparms->lkey = (u32)outs[2];
-	outparms->rkey = (u32)outs[3];
-
-	return ret;
-}
-
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
-			     const struct ehca_mr *mr,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count)
-{
-	u64 ret;
-
-	if (unlikely(ehca_debug_level >= 3)) {
-		if (count > 1) {
-			u64 *kpage;
-			int i;
-			kpage = __va(logical_address_of_page);
-			for (i = 0; i < count; i++)
-				ehca_gen_dbg("kpage[%d]=%p",
-					     i, (void *)kpage[i]);
-		} else
-			ehca_gen_dbg("kpage=%p",
-				     (void *)logical_address_of_page);
-	}
-
-	if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
-		ehca_gen_err("logical_address_of_page not on a 4k boundary "
-			     "adapter_handle=%llx mr=%p mr_handle=%llx "
-			     "pagesize=%x queue_type=%x "
-			     "logical_address_of_page=%llx count=%llx",
-			     adapter_handle.handle, mr,
-			     mr->ipz_mr_handle.handle, pagesize, queue_type,
-			     logical_address_of_page, count);
-		ret = H_PARAMETER;
-	} else
-		ret = hipz_h_register_rpage(adapter_handle, pagesize,
-					    queue_type,
-					    mr->ipz_mr_handle.handle,
-					    logical_address_of_page, count);
-	return ret;
-}
-
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
-		    const struct ehca_mr *mr,
-		    struct ehca_mr_hipzout_parms *outparms)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
-				adapter_handle.handle,     /* r4 */
-				mr->ipz_mr_handle.handle,  /* r5 */
-				0, 0, 0, 0, 0, 0, 0);
-	outparms->len = outs[0];
-	outparms->vaddr = outs[1];
-	outparms->acl  = outs[4] >> 32;
-	outparms->lkey = (u32)(outs[5] >> 32);
-	outparms->rkey = (u32)(outs[5] & (0xffffffff));
-
-	return ret;
-}
-
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
-			    const struct ehca_mr *mr)
-{
-	return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-				       adapter_handle.handle,    /* r4 */
-				       mr->ipz_mr_handle.handle, /* r5 */
-				       0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
-			  const struct ehca_mr *mr,
-			  const u64 vaddr_in,
-			  const u64 length,
-			  const u32 access_ctrl,
-			  const struct ipz_pd pd,
-			  const u64 mr_addr_cb,
-			  struct ehca_mr_hipzout_parms *outparms)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
-				adapter_handle.handle,    /* r4 */
-				mr->ipz_mr_handle.handle, /* r5 */
-				vaddr_in,	          /* r6 */
-				length,                   /* r7 */
-				/* r8 */
-				((((u64)access_ctrl) << 32ULL) | pd.value),
-				mr_addr_cb,               /* r9 */
-				0, 0, 0);
-	outparms->vaddr = outs[1];
-	outparms->lkey = (u32)outs[2];
-	outparms->rkey = (u32)outs[3];
-
-	return ret;
-}
-
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
-			const struct ehca_mr *mr,
-			const struct ehca_mr *orig_mr,
-			const u64 vaddr_in,
-			const u32 access_ctrl,
-			const struct ipz_pd pd,
-			struct ehca_mr_hipzout_parms *outparms)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
-				adapter_handle.handle,            /* r4 */
-				orig_mr->ipz_mr_handle.handle,    /* r5 */
-				vaddr_in,                         /* r6 */
-				(((u64)access_ctrl) << 32ULL),    /* r7 */
-				pd.value,                         /* r8 */
-				0, 0, 0, 0);
-	outparms->handle.handle = outs[0];
-	outparms->lkey = (u32)outs[2];
-	outparms->rkey = (u32)outs[3];
-
-	return ret;
-}
-
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
-			     const struct ehca_mw *mw,
-			     const struct ipz_pd pd,
-			     struct ehca_mw_hipzout_parms *outparms)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-				adapter_handle.handle,      /* r4 */
-				6,                          /* r5 */
-				pd.value,                   /* r6 */
-				0, 0, 0, 0, 0, 0);
-	outparms->handle.handle = outs[0];
-	outparms->rkey = (u32)outs[3];
-
-	return ret;
-}
-
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
-		    const struct ehca_mw *mw,
-		    struct ehca_mw_hipzout_parms *outparms)
-{
-	u64 ret;
-	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-	ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
-				adapter_handle.handle,    /* r4 */
-				mw->ipz_mw_handle.handle, /* r5 */
-				0, 0, 0, 0, 0, 0, 0);
-	outparms->rkey = (u32)outs[3];
-
-	return ret;
-}
-
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
-			    const struct ehca_mw *mw)
-{
-	return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-				       adapter_handle.handle,    /* r4 */
-				       mw->ipz_mw_handle.handle, /* r5 */
-				       0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
-		      const u64 ressource_handle,
-		      void *rblock,
-		      unsigned long *byte_count)
-{
-	u64 r_cb = __pa(rblock);
-
-	if (r_cb & (EHCA_PAGESIZE-1)) {
-		ehca_gen_err("rblock not page aligned.");
-		return H_PARAMETER;
-	}
-
-	return ehca_plpar_hcall_norets(H_ERROR_DATA,
-				       adapter_handle.handle,
-				       ressource_handle,
-				       r_cb,
-				       0, 0, 0, 0);
-}
-
-u64 hipz_h_eoi(int irq)
-{
-	unsigned long xirr;
-
-	iosync();
-	xirr = (0xffULL << 24) | irq;
-
-	return plpar_hcall_norets(H_EOI, xirr);
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
deleted file mode 100644
index a46e514..0000000
--- a/drivers/staging/rdma/ehca/hcp_if.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware Infiniband Interface code for POWER
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_IF_H__
-#define __HCP_IF_H__
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "hipz_hw.h"
-
-/*
- * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initialize
- * resources, create the empty EQPT (ring).
- */
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
-			     struct ehca_pfeq *pfeq,
-			     const u32 neq_control,
-			     const u32 number_of_entries,
-			     struct ipz_eq_handle *eq_handle,
-			     u32 * act_nr_of_entries,
-			     u32 * act_pages,
-			     u32 * eq_ist);
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
-		       struct ipz_eq_handle eq_handle,
-		       const u64 event_mask);
-/*
- * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
- * resources, create the empty CQPT (ring).
- */
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
-			     struct ehca_cq *cq,
-			     struct ehca_alloc_cq_parms *param);
-
-
-/*
- * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
- * initialize resources, create empty QPPTs (2 rings).
- */
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
-			     struct ehca_alloc_qp_parms *parms, int is_user);
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
-		      const u8 port_id,
-		      struct hipz_query_port *query_port_response_block);
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
-		       const u8 port_id, const u32 port_cap,
-		       const u8 init_type, const int modify_mask);
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
-		     struct hipz_query_hca *query_hca_rblock);
-
-/*
- * hipz_h_register_rpage internal function in hcp_if.h for all
- * hcp_H_REGISTER_RPAGE calls.
- */
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
-			  const u8 pagesize,
-			  const u8 queue_type,
-			  const u64 resource_handle,
-			  const u64 logical_address_of_page,
-			  u64 count);
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
-			     const struct ipz_eq_handle eq_handle,
-			     struct ehca_pfeq *pfeq,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count);
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle
-			   hcp_adapter_handle,
-			   u32 ist);
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
-			     const struct ipz_cq_handle cq_handle,
-			     struct ehca_pfcq *pfcq,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count,
-			     const struct h_galpa gal);
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
-			     const struct ipz_qp_handle qp_handle,
-			     struct ehca_pfqp *pfqp,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count,
-			     const struct h_galpa galpa);
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
-			       const struct ipz_qp_handle qp_handle,
-			       struct ehca_pfqp *pfqp,
-			       void **log_addr_next_sq_wqe_tb_processed,
-			       void **log_addr_next_rq_wqe_tb_processed,
-			       int dis_and_get_function_code);
-enum hcall_sigt {
-	HCALL_SIGT_NO_CQE = 0,
-	HCALL_SIGT_BY_WQE = 1,
-	HCALL_SIGT_EVERY = 2
-};
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
-		     const struct ipz_qp_handle qp_handle,
-		     struct ehca_pfqp *pfqp,
-		     const u64 update_mask,
-		     struct hcp_modify_qp_control_block *mqpcb,
-		     struct h_galpa gal);
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
-		    const struct ipz_qp_handle qp_handle,
-		    struct ehca_pfqp *pfqp,
-		    struct hcp_modify_qp_control_block *qqpcb,
-		    struct h_galpa gal);
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
-		      struct ehca_qp *qp);
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u32 port);
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u32 port, u32 * pma_qp_nr,
-		       u32 * bma_qp_nr);
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u16 mcg_dlid,
-		       u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
-		       const struct ipz_qp_handle qp_handle,
-		       struct h_galpa gal,
-		       u16 mcg_dlid,
-		       u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
-		      struct ehca_cq *cq,
-		      u8 force_flag);
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
-		      struct ehca_eq *eq);
-
-/*
- * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
-			     const struct ehca_mr *mr,
-			     const u64 vaddr,
-			     const u64 length,
-			     const u32 access_ctrl,
-			     const struct ipz_pd pd,
-			     struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
-			     const struct ehca_mr *mr,
-			     const u8 pagesize,
-			     const u8 queue_type,
-			     const u64 logical_address_of_page,
-			     const u64 count);
-
-/* hipz_h_query_mr queries MR in HW and FW */
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
-		    const struct ehca_mr *mr,
-		    struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mr frees MR resources in HW and FW */
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
-			    const struct ehca_mr *mr);
-
-/* hipz_h_reregister_pmr reregisters MR in HW and FW */
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
-			  const struct ehca_mr *mr,
-			  const u64 vaddr_in,
-			  const u64 length,
-			  const u32 access_ctrl,
-			  const struct ipz_pd pd,
-			  const u64 mr_addr_cb,
-			  struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_smr register shared MR in HW and FW */
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
-			const struct ehca_mr *mr,
-			const struct ehca_mr *orig_mr,
-			const u64 vaddr_in,
-			const u32 access_ctrl,
-			const struct ipz_pd pd,
-			struct ehca_mr_hipzout_parms *outparms);
-
-/*
- * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
-			     const struct ehca_mw *mw,
-			     const struct ipz_pd pd,
-			     struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_query_mw queries MW in HW and FW */
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
-		    const struct ehca_mw *mw,
-		    struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mw frees MW resources in HW and FW */
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
-			    const struct ehca_mw *mw);
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
-		      const u64 ressource_handle,
-		      void *rblock,
-		      unsigned long *byte_count);
-u64 hipz_h_eoi(int irq);
-
-#endif /* __HCP_IF_H__ */
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
deleted file mode 100644
index 077376f..0000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *   load store abstraction for ehca register access with tracing
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-u64 hcall_map_page(u64 physaddr)
-{
-	return (u64)ioremap(physaddr, EHCA_PAGESIZE);
-}
-
-int hcall_unmap_page(u64 mapaddr)
-{
-	iounmap((volatile void __iomem *) mapaddr);
-	return 0;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
-		    u64 paddr_kernel, u64 paddr_user)
-{
-	if (!is_user) {
-		galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
-		if (!galpas->kernel.fw_handle)
-			return -ENOMEM;
-	} else
-		galpas->kernel.fw_handle = 0;
-
-	galpas->user.fw_handle = paddr_user;
-
-	return 0;
-}
-
-int hcp_galpas_dtor(struct h_galpas *galpas)
-{
-	if (galpas->kernel.fw_handle) {
-		int ret = hcall_unmap_page(galpas->kernel.fw_handle);
-		if (ret)
-			return ret;
-	}
-
-	galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
-
-	return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
deleted file mode 100644
index d1b0299..0000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware calls
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_PHYP_H__
-#define __HCP_PHYP_H__
-
-
-/*
- * eHCA page (mapped into memory)
- * resource to access eHCA register pages in CPU address space
-*/
-struct h_galpa {
-	u64 fw_handle;
-	/* for pSeries this is a 64bit memory address where
-	   I/O memory is mapped into CPU address space (kv) */
-};
-
-/*
- * resource to access eHCA address space registers, all types
- */
-struct h_galpas {
-	u32 pid;		/*PID of userspace galpa checking */
-	struct h_galpa user;	/* user space accessible resource,
-				   set to 0 if unused */
-	struct h_galpa kernel;	/* kernel space accessible resource,
-				   set to 0 if unused */
-};
-
-static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
-{
-	u64 addr = galpa.fw_handle + offset;
-	return *(volatile u64 __force *)addr;
-}
-
-static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
-{
-	u64 addr = galpa.fw_handle + offset;
-	*(volatile u64 __force *)addr = value;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
-		    u64 paddr_kernel, u64 paddr_user);
-
-int hcp_galpas_dtor(struct h_galpas *galpas);
-
-u64 hcall_map_page(u64 physaddr);
-
-int hcall_unmap_page(u64 mapaddr);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
deleted file mode 100644
index 9dac93d..0000000
--- a/drivers/staging/rdma/ehca/hipz_fns.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HW abstraction register functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_H__
-#define __HIPZ_FNS_H__
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-#include "hipz_fns_core.h"
-
-#define hipz_galpa_store_eq(gal, offset, value) \
-	hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_eq(gal, offset) \
-	hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qped(gal, offset, value) \
-	hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_qped(gal, offset) \
-	hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
-
-#define hipz_galpa_store_mrmw(gal, offset, value) \
-	hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_mrmw(gal, offset) \
-	hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
deleted file mode 100644
index 868735f..0000000
--- a/drivers/staging/rdma/ehca/hipz_fns_core.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HW abstraction register functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_CORE_H__
-#define __HIPZ_FNS_CORE_H__
-
-#include "hcp_phyp.h"
-#include "hipz_hw.h"
-
-#define hipz_galpa_store_cq(gal, offset, value) \
-	hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_cq(gal, offset) \
-	hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qp(gal, offset, value) \
-	hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
-#define hipz_galpa_load_qp(gal, offset) \
-	hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
-
-static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
-{
-	/*  ringing doorbell :-) */
-	hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
-			    EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
-}
-
-static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
-{
-	/*  ringing doorbell :-) */
-	hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
-			    EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
-}
-
-static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
-{
-	hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
-			    EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
-}
-
-static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
-{
-	u64 cqx_n0_reg;
-
-	hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
-			    EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
-					   value));
-	cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
-}
-
-static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
-{
-	u64 cqx_n1_reg;
-
-	hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
-			    EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
-	cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
-}
-
-#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/staging/rdma/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
deleted file mode 100644
index bf996c7..0000000
--- a/drivers/staging/rdma/ehca/hipz_hw.h
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  eHCA register definitions
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_HW_H__
-#define __HIPZ_HW_H__
-
-#include "ehca_tools.h"
-
-#define EHCA_MAX_MTU 4
-
-/* QP Table Entry Memory Map */
-struct hipz_qptemm {
-	u64 qpx_hcr;
-	u64 qpx_c;
-	u64 qpx_herr;
-	u64 qpx_aer;
-/* 0x20*/
-	u64 qpx_sqa;
-	u64 qpx_sqc;
-	u64 qpx_rqa;
-	u64 qpx_rqc;
-/* 0x40*/
-	u64 qpx_st;
-	u64 qpx_pmstate;
-	u64 qpx_pmfa;
-	u64 qpx_pkey;
-/* 0x60*/
-	u64 qpx_pkeya;
-	u64 qpx_pkeyb;
-	u64 qpx_pkeyc;
-	u64 qpx_pkeyd;
-/* 0x80*/
-	u64 qpx_qkey;
-	u64 qpx_dqp;
-	u64 qpx_dlidp;
-	u64 qpx_portp;
-/* 0xa0*/
-	u64 qpx_slidp;
-	u64 qpx_slidpp;
-	u64 qpx_dlida;
-	u64 qpx_porta;
-/* 0xc0*/
-	u64 qpx_slida;
-	u64 qpx_slidpa;
-	u64 qpx_slvl;
-	u64 qpx_ipd;
-/* 0xe0*/
-	u64 qpx_mtu;
-	u64 qpx_lato;
-	u64 qpx_rlimit;
-	u64 qpx_rnrlimit;
-/* 0x100*/
-	u64 qpx_t;
-	u64 qpx_sqhp;
-	u64 qpx_sqptp;
-	u64 qpx_nspsn;
-/* 0x120*/
-	u64 qpx_nspsnhwm;
-	u64 reserved1;
-	u64 qpx_sdsi;
-	u64 qpx_sdsbc;
-/* 0x140*/
-	u64 qpx_sqwsize;
-	u64 qpx_sqwts;
-	u64 qpx_lsn;
-	u64 qpx_nssn;
-/* 0x160 */
-	u64 qpx_mor;
-	u64 qpx_cor;
-	u64 qpx_sqsize;
-	u64 qpx_erc;
-/* 0x180*/
-	u64 qpx_rnrrc;
-	u64 qpx_ernrwt;
-	u64 qpx_rnrresp;
-	u64 qpx_lmsna;
-/* 0x1a0 */
-	u64 qpx_sqhpc;
-	u64 qpx_sqcptp;
-	u64 qpx_sigt;
-	u64 qpx_wqecnt;
-/* 0x1c0*/
-	u64 qpx_rqhp;
-	u64 qpx_rqptp;
-	u64 qpx_rqsize;
-	u64 qpx_nrr;
-/* 0x1e0*/
-	u64 qpx_rdmac;
-	u64 qpx_nrpsn;
-	u64 qpx_lapsn;
-	u64 qpx_lcr;
-/* 0x200*/
-	u64 qpx_rwc;
-	u64 qpx_rwva;
-	u64 qpx_rdsi;
-	u64 qpx_rdsbc;
-/* 0x220*/
-	u64 qpx_rqwsize;
-	u64 qpx_crmsn;
-	u64 qpx_rdd;
-	u64 qpx_larpsn;
-/* 0x240*/
-	u64 qpx_pd;
-	u64 qpx_scqn;
-	u64 qpx_rcqn;
-	u64 qpx_aeqn;
-/* 0x260*/
-	u64 qpx_aaelog;
-	u64 qpx_ram;
-	u64 qpx_rdmaqe0;
-	u64 qpx_rdmaqe1;
-/* 0x280*/
-	u64 qpx_rdmaqe2;
-	u64 qpx_rdmaqe3;
-	u64 qpx_nrpsnhwm;
-/* 0x298*/
-	u64 reserved[(0x400 - 0x298) / 8];
-/* 0x400 extended data */
-	u64 reserved_ext[(0x500 - 0x400) / 8];
-/* 0x500 */
-	u64 reserved2[(0x1000 - 0x500) / 8];
-/* 0x1000      */
-};
-
-#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
-
-#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
-
-/* MRMWPT Entry Memory Map */
-struct hipz_mrmwmm {
-	/* 0x00 */
-	u64 mrx_hcr;
-
-	u64 mrx_c;
-	u64 mrx_herr;
-	u64 mrx_aer;
-	/* 0x20 */
-	u64 mrx_pp;
-	u64 reserved1;
-	u64 reserved2;
-	u64 reserved3;
-	/* 0x40 */
-	u64 reserved4[(0x200 - 0x40) / 8];
-	/* 0x200 */
-	u64 mrx_ctl[64];
-
-};
-
-#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
-
-struct hipz_qpedmm {
-	/* 0x00 */
-	u64 reserved0[(0x400) / 8];
-	/* 0x400 */
-	u64 qpedx_phh;
-	u64 qpedx_ppsgp;
-	/* 0x410 */
-	u64 qpedx_ppsgu;
-	u64 qpedx_ppdgp;
-	/* 0x420 */
-	u64 qpedx_ppdgu;
-	u64 qpedx_aph;
-	/* 0x430 */
-	u64 qpedx_apsgp;
-	u64 qpedx_apsgu;
-	/* 0x440 */
-	u64 qpedx_apdgp;
-	u64 qpedx_apdgu;
-	/* 0x450 */
-	u64 qpedx_apav;
-	u64 qpedx_apsav;
-	/* 0x460  */
-	u64 qpedx_hcr;
-	u64 reserved1[4];
-	/* 0x488 */
-	u64 qpedx_rrl0;
-	/* 0x490 */
-	u64 qpedx_rrrkey0;
-	u64 qpedx_rrva0;
-	/* 0x4a0 */
-	u64 reserved2;
-	u64 qpedx_rrl1;
-	/* 0x4b0 */
-	u64 qpedx_rrrkey1;
-	u64 qpedx_rrva1;
-	/* 0x4c0 */
-	u64 reserved3;
-	u64 qpedx_rrl2;
-	/* 0x4d0 */
-	u64 qpedx_rrrkey2;
-	u64 qpedx_rrva2;
-	/* 0x4e0 */
-	u64 reserved4;
-	u64 qpedx_rrl3;
-	/* 0x4f0 */
-	u64 qpedx_rrrkey3;
-	u64 qpedx_rrva3;
-};
-
-#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
-
-/* CQ Table Entry Memory Map */
-struct hipz_cqtemm {
-	u64 cqx_hcr;
-	u64 cqx_c;
-	u64 cqx_herr;
-	u64 cqx_aer;
-/* 0x20  */
-	u64 cqx_ptp;
-	u64 cqx_tp;
-	u64 cqx_fec;
-	u64 cqx_feca;
-/* 0x40  */
-	u64 cqx_ep;
-	u64 cqx_eq;
-/* 0x50  */
-	u64 reserved1;
-	u64 cqx_n0;
-/* 0x60  */
-	u64 cqx_n1;
-	u64 reserved2[(0x1000 - 0x60) / 8];
-/* 0x1000 */
-};
-
-#define CQX_FEC_CQE_CNT           EHCA_BMASK_IBM(32, 63)
-#define CQX_FECADDER              EHCA_BMASK_IBM(32, 63)
-#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-
-#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
-
-/* EQ Table Entry Memory Map */
-struct hipz_eqtemm {
-	u64 eqx_hcr;
-	u64 eqx_c;
-
-	u64 eqx_herr;
-	u64 eqx_aer;
-/* 0x20 */
-	u64 eqx_ptp;
-	u64 eqx_tp;
-	u64 eqx_ssba;
-	u64 eqx_psba;
-
-/* 0x40 */
-	u64 eqx_cec;
-	u64 eqx_meql;
-	u64 eqx_xisbi;
-	u64 eqx_xisc;
-/* 0x60 */
-	u64 eqx_it;
-
-};
-
-#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
-
-/* access control defines for MR/MW */
-#define HIPZ_ACCESSCTRL_L_WRITE  0x00800000
-#define HIPZ_ACCESSCTRL_R_WRITE  0x00400000
-#define HIPZ_ACCESSCTRL_R_READ   0x00200000
-#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
-#define HIPZ_ACCESSCTRL_MW_BIND  0x00080000
-
-/* query hca response block */
-struct hipz_query_hca {
-	u32 cur_reliable_dg;
-	u32 cur_qp;
-	u32 cur_cq;
-	u32 cur_eq;
-	u32 cur_mr;
-	u32 cur_mw;
-	u32 cur_ee_context;
-	u32 cur_mcast_grp;
-	u32 cur_qp_attached_mcast_grp;
-	u32 reserved1;
-	u32 cur_ipv6_qp;
-	u32 cur_eth_qp;
-	u32 cur_hp_mr;
-	u32 reserved2[3];
-	u32 max_rd_domain;
-	u32 max_qp;
-	u32 max_cq;
-	u32 max_eq;
-	u32 max_mr;
-	u32 max_hp_mr;
-	u32 max_mw;
-	u32 max_mrwpte;
-	u32 max_special_mrwpte;
-	u32 max_rd_ee_context;
-	u32 max_mcast_grp;
-	u32 max_total_mcast_qp_attach;
-	u32 max_mcast_qp_attach;
-	u32 max_raw_ipv6_qp;
-	u32 max_raw_ethy_qp;
-	u32 internal_clock_frequency;
-	u32 max_pd;
-	u32 max_ah;
-	u32 max_cqe;
-	u32 max_wqes_wq;
-	u32 max_partitions;
-	u32 max_rr_ee_context;
-	u32 max_rr_qp;
-	u32 max_rr_hca;
-	u32 max_act_wqs_ee_context;
-	u32 max_act_wqs_qp;
-	u32 max_sge;
-	u32 max_sge_rd;
-	u32 memory_page_size_supported;
-	u64 max_mr_size;
-	u32 local_ca_ack_delay;
-	u32 num_ports;
-	u32 vendor_id;
-	u32 vendor_part_id;
-	u32 hw_ver;
-	u64 node_guid;
-	u64 hca_cap_indicators;
-	u32 data_counter_register_size;
-	u32 max_shared_rq;
-	u32 max_isns_eq;
-	u32 max_neq;
-} __attribute__ ((packed));
-
-#define HCA_CAP_AH_PORT_NR_CHECK      EHCA_BMASK_IBM( 0,  0)
-#define HCA_CAP_ATOMIC                EHCA_BMASK_IBM( 1,  1)
-#define HCA_CAP_AUTO_PATH_MIG         EHCA_BMASK_IBM( 2,  2)
-#define HCA_CAP_BAD_P_KEY_CTR         EHCA_BMASK_IBM( 3,  3)
-#define HCA_CAP_SQD_RTS_PORT_CHANGE   EHCA_BMASK_IBM( 4,  4)
-#define HCA_CAP_CUR_QP_STATE_MOD      EHCA_BMASK_IBM( 5,  5)
-#define HCA_CAP_INIT_TYPE             EHCA_BMASK_IBM( 6,  6)
-#define HCA_CAP_PORT_ACTIVE_EVENT     EHCA_BMASK_IBM( 7,  7)
-#define HCA_CAP_Q_KEY_VIOL_CTR        EHCA_BMASK_IBM( 8,  8)
-#define HCA_CAP_WQE_RESIZE            EHCA_BMASK_IBM( 9,  9)
-#define HCA_CAP_RAW_PACKET_MCAST      EHCA_BMASK_IBM(10, 10)
-#define HCA_CAP_SHUTDOWN_PORT         EHCA_BMASK_IBM(11, 11)
-#define HCA_CAP_RC_LL_QP              EHCA_BMASK_IBM(12, 12)
-#define HCA_CAP_SRQ                   EHCA_BMASK_IBM(13, 13)
-#define HCA_CAP_UD_LL_QP              EHCA_BMASK_IBM(16, 16)
-#define HCA_CAP_RESIZE_MR             EHCA_BMASK_IBM(17, 17)
-#define HCA_CAP_MINI_QP               EHCA_BMASK_IBM(18, 18)
-#define HCA_CAP_H_ALLOC_RES_SYNC      EHCA_BMASK_IBM(19, 19)
-
-/* query port response block */
-struct hipz_query_port {
-	u32 state;
-	u32 bad_pkey_cntr;
-	u32 lmc;
-	u32 lid;
-	u32 subnet_timeout;
-	u32 qkey_viol_cntr;
-	u32 sm_sl;
-	u32 sm_lid;
-	u32 capability_mask;
-	u32 init_type_reply;
-	u32 pkey_tbl_len;
-	u32 gid_tbl_len;
-	u64 gid_prefix;
-	u32 port_nr;
-	u16 pkey_entries[16];
-	u8  reserved1[32];
-	u32 trent_size;
-	u32 trbuf_size;
-	u64 max_msg_sz;
-	u32 max_mtu;
-	u32 vl_cap;
-	u32 phys_pstate;
-	u32 phys_state;
-	u32 phys_speed;
-	u32 phys_width;
-	u8  reserved2[1884];
-	u64 guid_entries[255];
-} __attribute__ ((packed));
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
deleted file mode 100644
index 7ffc748..0000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.c
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  internal queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ipz_pt_fn.h"
-#include "ehca_classes.h"
-
-#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
-
-struct kmem_cache *small_qp_cache;
-
-void *ipz_qpageit_get_inc(struct ipz_queue *queue)
-{
-	void *ret = ipz_qeit_get(queue);
-	queue->current_q_offset += queue->pagesize;
-	if (queue->current_q_offset > queue->queue_length) {
-		queue->current_q_offset -= queue->pagesize;
-		ret = NULL;
-	}
-	if (((u64)ret) % queue->pagesize) {
-		ehca_gen_err("ERROR!! not at PAGE-Boundary");
-		return NULL;
-	}
-	return ret;
-}
-
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
-{
-	void *ret = ipz_qeit_get(queue);
-	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
-
-	queue->current_q_offset += queue->qe_size;
-	if (queue->current_q_offset > last_entry_in_q) {
-		queue->current_q_offset = 0;
-		queue->toggle_state = (~queue->toggle_state) & 1;
-	}
-
-	return ret;
-}
-
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
-{
-	int i;
-	for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
-		u64 page = __pa(queue->queue_pages[i]);
-		if (addr >= page && addr < page + queue->pagesize) {
-			*q_offset = addr - page + i * queue->pagesize;
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-
-#if PAGE_SHIFT < EHCA_PAGESHIFT
-#error Kernel pages must be at least as large than eHCA pages (4K) !
-#endif
-
-/*
- * allocate pages for queue:
- * outer loop allocates whole kernel pages (page aligned) and
- * inner loop divides a kernel page into smaller hca queue pages
- */
-static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
-{
-	int k, f = 0;
-	u8 *kpage;
-
-	while (f < nr_of_pages) {
-		kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
-		if (!kpage)
-			goto out;
-
-		for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
-			queue->queue_pages[f] = (struct ipz_page *)kpage;
-			kpage += EHCA_PAGESIZE;
-			f++;
-		}
-	}
-	return 1;
-
-out:
-	for (f = 0; f < nr_of_pages && queue->queue_pages[f];
-	     f += PAGES_PER_KPAGE)
-		free_page((unsigned long)(queue->queue_pages)[f]);
-	return 0;
-}
-
-static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
-	int order = ilog2(queue->pagesize) - 9;
-	struct ipz_small_queue_page *page;
-	unsigned long bit;
-
-	mutex_lock(&pd->lock);
-
-	if (!list_empty(&pd->free[order]))
-		page = list_entry(pd->free[order].next,
-				  struct ipz_small_queue_page, list);
-	else {
-		page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
-		if (!page)
-			goto out;
-
-		page->page = get_zeroed_page(GFP_KERNEL);
-		if (!page->page) {
-			kmem_cache_free(small_qp_cache, page);
-			goto out;
-		}
-
-		list_add(&page->list, &pd->free[order]);
-	}
-
-	bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
-	__set_bit(bit, page->bitmap);
-	page->fill++;
-
-	if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
-		list_move(&page->list, &pd->full[order]);
-
-	mutex_unlock(&pd->lock);
-
-	queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
-	queue->small_page = page;
-	queue->offset = bit << (order + 9);
-	return 1;
-
-out:
-	ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
-	mutex_unlock(&pd->lock);
-	return 0;
-}
-
-static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
-	int order = ilog2(queue->pagesize) - 9;
-	struct ipz_small_queue_page *page = queue->small_page;
-	unsigned long bit;
-	int free_page = 0;
-
-	bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
-		>> (order + 9);
-
-	mutex_lock(&pd->lock);
-
-	__clear_bit(bit, page->bitmap);
-	page->fill--;
-
-	if (page->fill == 0) {
-		list_del(&page->list);
-		free_page = 1;
-	}
-
-	if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
-		/* the page was full until we freed the chunk */
-		list_move_tail(&page->list, &pd->free[order]);
-
-	mutex_unlock(&pd->lock);
-
-	if (free_page) {
-		free_page(page->page);
-		kmem_cache_free(small_qp_cache, page);
-	}
-}
-
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
-		   const u32 nr_of_pages, const u32 pagesize,
-		   const u32 qe_size, const u32 nr_of_sg,
-		   int is_small)
-{
-	if (pagesize > PAGE_SIZE) {
-		ehca_gen_err("FATAL ERROR: pagesize=%x "
-			     "is greater than kernel page size", pagesize);
-		return 0;
-	}
-
-	/* init queue fields */
-	queue->queue_length = nr_of_pages * pagesize;
-	queue->pagesize = pagesize;
-	queue->qe_size = qe_size;
-	queue->act_nr_of_sg = nr_of_sg;
-	queue->current_q_offset = 0;
-	queue->toggle_state = 1;
-	queue->small_page = NULL;
-
-	/* allocate queue page pointers */
-	queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
-				     GFP_KERNEL | __GFP_NOWARN);
-	if (!queue->queue_pages) {
-		queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
-		if (!queue->queue_pages) {
-			ehca_gen_err("Couldn't allocate queue page list");
-			return 0;
-		}
-	}
-
-	/* allocate actual queue pages */
-	if (is_small) {
-		if (!alloc_small_queue_page(queue, pd))
-			goto ipz_queue_ctor_exit0;
-	} else
-		if (!alloc_queue_pages(queue, nr_of_pages))
-			goto ipz_queue_ctor_exit0;
-
-	return 1;
-
-ipz_queue_ctor_exit0:
-	ehca_gen_err("Couldn't alloc pages queue=%p "
-		 "nr_of_pages=%x",  queue, nr_of_pages);
-	kvfree(queue->queue_pages);
-
-	return 0;
-}
-
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
-{
-	int i, nr_pages;
-
-	if (!queue || !queue->queue_pages) {
-		ehca_gen_dbg("queue or queue_pages is NULL");
-		return 0;
-	}
-
-	if (queue->small_page)
-		free_small_queue_page(queue, pd);
-	else {
-		nr_pages = queue->queue_length / queue->pagesize;
-		for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
-			free_page((unsigned long)queue->queue_pages[i]);
-	}
-
-	kvfree(queue->queue_pages);
-
-	return 1;
-}
-
-int ehca_init_small_qp_cache(void)
-{
-	small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
-					   sizeof(struct ipz_small_queue_page),
-					   0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!small_qp_cache)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void ehca_cleanup_small_qp_cache(void)
-{
-	kmem_cache_destroy(small_qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
deleted file mode 100644
index a801274..0000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.h
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  internal queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __IPZ_PT_FN_H__
-#define __IPZ_PT_FN_H__
-
-#define EHCA_PAGESHIFT   12
-#define EHCA_PAGESIZE   4096UL
-#define EHCA_PAGEMASK   (~(EHCA_PAGESIZE-1))
-#define EHCA_PT_ENTRIES 512UL
-
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-
-struct ehca_pd;
-struct ipz_small_queue_page;
-
-extern struct kmem_cache *small_qp_cache;
-
-/* struct generic ehca page */
-struct ipz_page {
-	u8 entries[EHCA_PAGESIZE];
-};
-
-#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
-
-struct ipz_small_queue_page {
-	unsigned long page;
-	unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
-	int fill;
-	void *mapped_addr;
-	u32 mmap_count;
-	struct list_head list;
-};
-
-/* struct generic queue in linux kernel virtual memory (kv) */
-struct ipz_queue {
-	u64 current_q_offset;	/* current queue entry */
-
-	struct ipz_page **queue_pages;	/* array of pages belonging to queue */
-	u32 qe_size;		/* queue entry size */
-	u32 act_nr_of_sg;
-	u32 queue_length;	/* queue length allocated in bytes */
-	u32 pagesize;
-	u32 toggle_state;	/* toggle flag - per page */
-	u32 offset; /* save offset within page for small_qp */
-	struct ipz_small_queue_page *small_page;
-};
-
-/*
- * return current Queue Entry for a certain q_offset
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
-{
-	struct ipz_page *current_page;
-	if (q_offset >= queue->queue_length)
-		return NULL;
-	current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
-	return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
-}
-
-/*
- * return current Queue Entry
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_get(struct ipz_queue *queue)
-{
-	return ipz_qeit_calc(queue, queue->current_q_offset);
-}
-
-/*
- * return current Queue Page , increment Queue Page iterator from
- * page to page in struct ipz_queue, last increment will return 0! and
- * NOT wrap
- * returns address (kv) of Queue Page
- * warning don't use in parallel with ipz_QE_get_inc()
- */
-void *ipz_qpageit_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
-{
-	void *ret = ipz_qeit_get(queue);
-	queue->current_q_offset += queue->qe_size;
-	if (queue->current_q_offset >= queue->queue_length) {
-		queue->current_q_offset = 0;
-		/* toggle the valid flag */
-		queue->toggle_state = (~queue->toggle_state) & 1;
-	}
-
-	return ret;
-}
-
-/*
- * return a bool indicating whether current Queue Entry is valid
- */
-static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
-{
-	struct ehca_cqe *cqe = ipz_qeit_get(queue);
-	return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
-}
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
-{
-	return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
-}
-
-/*
- * returns and resets Queue Entry iterator
- * returns address (kv) of first Queue Entry
- */
-static inline void *ipz_qeit_reset(struct ipz_queue *queue)
-{
-	queue->current_q_offset = 0;
-	return ipz_qeit_get(queue);
-}
-
-/*
- * return the q_offset corresponding to an absolute address
- */
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
-
-/*
- * return the next queue offset. don't modify the queue.
- */
-static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
-{
-	offset += queue->qe_size;
-	if (offset >= queue->queue_length) offset = 0;
-	return offset;
-}
-
-/* struct generic page table */
-struct ipz_pt {
-	u64 entries[EHCA_PT_ENTRIES];
-};
-
-/* struct page table for a queue, only to be used in pf */
-struct ipz_qpt {
-	/* queue page tables (kv), use u64 because we know the element length */
-	u64 *qpts;
-	u32 n_qpts;
-	u32 n_ptes;       /*  number of page table entries */
-	u64 *current_pte_addr;
-};
-
-/*
- * constructor for a ipz_queue_t, placement new for ipz_queue_t,
- * new for all dependent datastructors
- * all QP Tables are the same
- * flow:
- *    allocate+pin queue
- * see ipz_qpt_ctor()
- * returns true if ok, false if out of memory
- */
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
-		   const u32 nr_of_pages, const u32 pagesize,
-		   const u32 qe_size, const u32 nr_of_sg,
-		   int is_small);
-
-/*
- * destructor for a ipz_queue_t
- *  -# free queue
- *  see ipz_queue_ctor()
- *  returns true if ok, false if queue was NULL-ptr of free failed
- */
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
-
-/*
- * constructor for a ipz_qpt_t,
- * placement new for struct ipz_queue, new for all dependent datastructors
- * all QP Tables are the same,
- * flow:
- * -# allocate+pin queue
- * -# initialise ptcb
- * -# allocate+pin PTs
- * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
- * -# the ring must have room for exactly nr_of_PTEs
- * see ipz_qpt_ctor()
- */
-void ipz_qpt_ctor(struct ipz_qpt *qpt,
-		  const u32 nr_of_qes,
-		  const u32 pagesize,
-		  const u32 qe_size,
-		  const u8 lowbyte, const u8 toggle,
-		  u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- * fix EQ page problems
- */
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Event Queue Entry, increment Queue Entry iterator
- * by one step in struct ipz_queue if valid, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_queue_QPageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- */
-static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
-{
-	void *ret = ipz_qeit_get(queue);
-	u32 qe = *(u8 *)ret;
-	if ((qe >> 7) != (queue->toggle_state & 1))
-		return NULL;
-	ipz_qeit_eq_get_inc(queue); /* this is a good one */
-	return ret;
-}
-
-static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
-{
-	void *ret = ipz_qeit_get(queue);
-	u32 qe = *(u8 *)ret;
-	if ((qe >> 7) != (queue->toggle_state & 1))
-		return NULL;
-	return ret;
-}
-
-/* returns address (GX) of first queue entry */
-static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
-{
-	return be64_to_cpu(qpt->qpts[0]);
-}
-
-/* returns address (kv) of first page of queue page table */
-static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
-{
-	return qpt->qpts;
-}
-
-#endif				/* __IPZ_PT_FN_H__ */
diff --git a/drivers/staging/rdma/ipath/Kconfig b/drivers/staging/rdma/ipath/Kconfig
deleted file mode 100644
index 041ce06..0000000
--- a/drivers/staging/rdma/ipath/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config INFINIBAND_IPATH
-	tristate "QLogic HTX HCA support"
-	depends on 64BIT && NET && HT_IRQ
-	---help---
-	This is a driver for the deprecated QLogic Hyper-Transport
-	IB host channel adapter (model QHT7140),
-	including InfiniBand verbs support.  This driver allows these
-	devices to be used with both kernel upper level protocols such
-	as IP-over-InfiniBand as well as with userspace applications
-	(in conjunction with InfiniBand userspace access).
-	For QLogic PCIe QLE based cards, use the QIB driver instead.
-
-	If you have this hardware you will need to boot with PAT disabled
-	on your x86-64 systems, use the nopat kernel parameter.
-
-	Note that this driver will soon be removed entirely from the kernel.
diff --git a/drivers/staging/rdma/ipath/Makefile b/drivers/staging/rdma/ipath/Makefile
deleted file mode 100644
index 4496f28..0000000
--- a/drivers/staging/rdma/ipath/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-	-DIPATH_KERN_TYPE=0
-
-obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
-
-ib_ipath-y := \
-	ipath_cq.o \
-	ipath_diag.o \
-	ipath_dma.o \
-	ipath_driver.o \
-	ipath_eeprom.o \
-	ipath_file_ops.o \
-	ipath_fs.o \
-	ipath_init_chip.o \
-	ipath_intr.o \
-	ipath_keys.o \
-	ipath_mad.o \
-	ipath_mmap.o \
-	ipath_mr.o \
-	ipath_qp.o \
-	ipath_rc.o \
-	ipath_ruc.o \
-	ipath_sdma.o \
-	ipath_srq.o \
-	ipath_stats.o \
-	ipath_sysfs.o \
-	ipath_uc.o \
-	ipath_ud.o \
-	ipath_user_pages.o \
-	ipath_user_sdma.o \
-	ipath_verbs_mcast.o \
-	ipath_verbs.o
-
-ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-
-ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
-ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/staging/rdma/ipath/TODO b/drivers/staging/rdma/ipath/TODO
deleted file mode 100644
index cb00158..0000000
--- a/drivers/staging/rdma/ipath/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-The ipath driver has been moved to staging in preparation for its removal in a
-few releases. The driver will be deleted during the 4.6 merge window.
-
-Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
-Cc: linux-rdma@vger.kernel.org
diff --git a/drivers/staging/rdma/ipath/ipath_common.h b/drivers/staging/rdma/ipath/ipath_common.h
deleted file mode 100644
index 28cfe97..0000000
--- a/drivers/staging/rdma/ipath/ipath_common.h
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_COMMON_H
-#define _IPATH_COMMON_H
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-
-/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
-#define IPATH_SRC_OUI_1 0x00
-#define IPATH_SRC_OUI_2 0x11
-#define IPATH_SRC_OUI_3 0x75
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fastpath code
- * IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in faspath code
- * _IPATH_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- * _IPATH_DEBUGGING define as 0 if you want to remove debug prints
- */
-
-/*
- * The value in the BTH QP field that InfiniPath uses to differentiate
- * an infinipath protocol IB packet vs standard IB transport
- */
-#define IPATH_KD_QP 0x656b79
-
-/*
- * valid states passed to ipath_set_linkstate() user call
- */
-#define IPATH_IB_LINKDOWN		0
-#define IPATH_IB_LINKARM		1
-#define IPATH_IB_LINKACTIVE		2
-#define IPATH_IB_LINKDOWN_ONLY		3
-#define IPATH_IB_LINKDOWN_SLEEP		4
-#define IPATH_IB_LINKDOWN_DISABLE	5
-#define IPATH_IB_LINK_LOOPBACK	6 /* enable local loopback */
-#define IPATH_IB_LINK_EXTERNAL	7 /* normal, disable local loopback */
-#define IPATH_IB_LINK_NO_HRTBT	8 /* disable Heartbeat, e.g. for loopback */
-#define IPATH_IB_LINK_HRTBT	9 /* enable heartbeat, normal, non-loopback */
-
-/*
- * These 3 values (SDR and DDR may be ORed for auto-speed
- * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
- * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs.  They
- * are also the the possible values for ipath_link_speed_enabled and active
- * The values were chosen to match values used within the IB spec.
- */
-#define IPATH_IB_SDR 1
-#define IPATH_IB_DDR 2
-
-/*
- * stats maintained by the driver.  For now, at least, this is global
- * to all minor devices.
- */
-struct infinipath_stats {
-	/* number of interrupts taken */
-	__u64 sps_ints;
-	/* number of interrupts for errors */
-	__u64 sps_errints;
-	/* number of errors from chip (not incl. packet errors or CRC) */
-	__u64 sps_errs;
-	/* number of packet errors from chip other than CRC */
-	__u64 sps_pkterrs;
-	/* number of packets with CRC errors (ICRC and VCRC) */
-	__u64 sps_crcerrs;
-	/* number of hardware errors reported (parity, etc.) */
-	__u64 sps_hwerrs;
-	/* number of times IB link changed state unexpectedly */
-	__u64 sps_iblink;
-	__u64 sps_unused; /* was fastrcvint, no longer implemented */
-	/* number of kernel (port0) packets received */
-	__u64 sps_port0pkts;
-	/* number of "ethernet" packets sent by driver */
-	__u64 sps_ether_spkts;
-	/* number of "ethernet" packets received by driver */
-	__u64 sps_ether_rpkts;
-	/* number of SMA packets sent by driver. Obsolete. */
-	__u64 sps_sma_spkts;
-	/* number of SMA packets received by driver. Obsolete. */
-	__u64 sps_sma_rpkts;
-	/* number of times all ports rcvhdrq was full and packet dropped */
-	__u64 sps_hdrqfull;
-	/* number of times all ports egrtid was full and packet dropped */
-	__u64 sps_etidfull;
-	/*
-	 * number of times we tried to send from driver, but no pio buffers
-	 * avail
-	 */
-	__u64 sps_nopiobufs;
-	/* number of ports currently open */
-	__u64 sps_ports;
-	/* list of pkeys (other than default) accepted (0 means not set) */
-	__u16 sps_pkeys[4];
-	__u16 sps_unused16[4]; /* available; maintaining compatible layout */
-	/* number of user ports per chip (not IB ports) */
-	__u32 sps_nports;
-	/* not our interrupt, or already handled */
-	__u32 sps_nullintr;
-	/* max number of packets handled per receive call */
-	__u32 sps_maxpkts_call;
-	/* avg number of packets handled per receive call */
-	__u32 sps_avgpkts_call;
-	/* total number of pages locked */
-	__u64 sps_pagelocks;
-	/* total number of pages unlocked */
-	__u64 sps_pageunlocks;
-	/*
-	 * Number of packets dropped in kernel other than errors (ether
-	 * packets if ipath not configured, etc.)
-	 */
-	__u64 sps_krdrops;
-	__u64 sps_txeparity; /* PIO buffer parity error, recovered */
-	/* pad for future growth */
-	__u64 __sps_pad[45];
-};
-
-/*
- * These are the status bits readable (in ascii form, 64bit value)
- * from the "status" sysfs file.
- */
-#define IPATH_STATUS_INITTED       0x1	/* basic initialization done */
-#define IPATH_STATUS_DISABLED      0x2	/* hardware disabled */
-/* Device has been disabled via admin request */
-#define IPATH_STATUS_ADMIN_DISABLED    0x4
-/* Chip has been found and initted */
-#define IPATH_STATUS_CHIP_PRESENT 0x20
-/* IB link is at ACTIVE, usable for data traffic */
-#define IPATH_STATUS_IB_READY     0x40
-/* link is configured, LID, MTU, etc. have been set */
-#define IPATH_STATUS_IB_CONF      0x80
-/* no link established, probably no cable */
-#define IPATH_STATUS_IB_NOCABLE  0x100
-/* A Fatal hardware error has occurred. */
-#define IPATH_STATUS_HWERROR     0x200
-
-/*
- * The list of usermode accessible registers.  Also see Reg_* later in file.
- */
-typedef enum _ipath_ureg {
-	/* (RO)  DMA RcvHdr to be used next. */
-	ur_rcvhdrtail = 0,
-	/* (RW)  RcvHdr entry to be processed next by host. */
-	ur_rcvhdrhead = 1,
-	/* (RO)  Index of next Eager index to use. */
-	ur_rcvegrindextail = 2,
-	/* (RW)  Eager TID to be processed next */
-	ur_rcvegrindexhead = 3,
-	/* For internal use only; max register number. */
-	_IPATH_UregMax
-} ipath_ureg;
-
-/* bit values for spi_runtime_flags */
-#define IPATH_RUNTIME_HT	0x1
-#define IPATH_RUNTIME_PCIE	0x2
-#define IPATH_RUNTIME_FORCE_WC_ORDER	0x4
-#define IPATH_RUNTIME_RCVHDR_COPY	0x8
-#define IPATH_RUNTIME_MASTER	0x10
-#define IPATH_RUNTIME_NODMA_RTAIL 0x80
-#define IPATH_RUNTIME_SDMA	      0x200
-#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
-#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
-
-/*
- * This structure is returned by ipath_userinit() immediately after
- * open to get implementation-specific info, and info specific to this
- * instance.
- *
- * This struct must have explict pad fields where type sizes
- * may result in different alignments between 32 and 64 bit
- * programs, since the 64 bit * bit kernel requires the user code
- * to have matching offsets
- */
-struct ipath_base_info {
-	/* version of hardware, for feature checking. */
-	__u32 spi_hw_version;
-	/* version of software, for feature checking. */
-	__u32 spi_sw_version;
-	/* InfiniPath port assigned, goes into sent packets */
-	__u16 spi_port;
-	__u16 spi_subport;
-	/*
-	 * IB MTU, packets IB data must be less than this.
-	 * The MTU is in bytes, and will be a multiple of 4 bytes.
-	 */
-	__u32 spi_mtu;
-	/*
-	 * Size of a PIO buffer.  Any given packet's total size must be less
-	 * than this (in words).  Included is the starting control word, so
-	 * if 513 is returned, then total pkt size is 512 words or less.
-	 */
-	__u32 spi_piosize;
-	/* size of the TID cache in infinipath, in entries */
-	__u32 spi_tidcnt;
-	/* size of the TID Eager list in infinipath, in entries */
-	__u32 spi_tidegrcnt;
-	/* size of a single receive header queue entry in words. */
-	__u32 spi_rcvhdrent_size;
-	/*
-	 * Count of receive header queue entries allocated.
-	 * This may be less than the spu_rcvhdrcnt passed in!.
-	 */
-	__u32 spi_rcvhdr_cnt;
-
-	/* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
-	__u32 spi_runtime_flags;
-
-	/* address where receive buffer queue is mapped into */
-	__u64 spi_rcvhdr_base;
-
-	/* user program. */
-
-	/* base address of eager TID receive buffers. */
-	__u64 spi_rcv_egrbufs;
-
-	/* Allocated by initialization code, not by protocol. */
-
-	/*
-	 * Size of each TID buffer in host memory, starting at
-	 * spi_rcv_egrbufs.  The buffers are virtually contiguous.
-	 */
-	__u32 spi_rcv_egrbufsize;
-	/*
-	 * The special QP (queue pair) value that identifies an infinipath
-	 * protocol packet from standard IB packets.  More, probably much
-	 * more, to be added.
-	 */
-	__u32 spi_qpair;
-
-	/*
-	 * User register base for init code, not to be used directly by
-	 * protocol or applications.
-	 */
-	__u64 __spi_uregbase;
-	/*
-	 * Maximum buffer size in bytes that can be used in a single TID
-	 * entry (assuming the buffer is aligned to this boundary).  This is
-	 * the minimum of what the hardware and software support Guaranteed
-	 * to be a power of 2.
-	 */
-	__u32 spi_tid_maxsize;
-	/*
-	 * alignment of each pio send buffer (byte count
-	 * to add to spi_piobufbase to get to second buffer)
-	 */
-	__u32 spi_pioalign;
-	/*
-	 * The index of the first pio buffer available to this process;
-	 * needed to do lookup in spi_pioavailaddr; not added to
-	 * spi_piobufbase.
-	 */
-	__u32 spi_pioindex;
-	 /* number of buffers mapped for this process */
-	__u32 spi_piocnt;
-
-	/*
-	 * Base address of writeonly pio buffers for this process.
-	 * Each buffer has spi_piosize words, and is aligned on spi_pioalign
-	 * boundaries.  spi_piocnt buffers are mapped from this address
-	 */
-	__u64 spi_piobufbase;
-
-	/*
-	 * Base address of readonly memory copy of the pioavail registers.
-	 * There are 2 bits for each buffer.
-	 */
-	__u64 spi_pioavailaddr;
-
-	/*
-	 * Address where driver updates a copy of the interface and driver
-	 * status (IPATH_STATUS_*) as a 64 bit value.  It's followed by a
-	 * string indicating hardware error, if there was one.
-	 */
-	__u64 spi_status;
-
-	/* number of chip ports available to user processes */
-	__u32 spi_nports;
-	/* unit number of chip we are using */
-	__u32 spi_unit;
-	/* num bufs in each contiguous set */
-	__u32 spi_rcv_egrperchunk;
-	/* size in bytes of each contiguous set */
-	__u32 spi_rcv_egrchunksize;
-	/* total size of mmap to cover full rcvegrbuffers */
-	__u32 spi_rcv_egrbuftotlen;
-	__u32 spi_filler_for_align;
-	/* address of readonly memory copy of the rcvhdrq tail register. */
-	__u64 spi_rcvhdr_tailaddr;
-
-	/* shared memory pages for subports if port is shared */
-	__u64 spi_subport_uregbase;
-	__u64 spi_subport_rcvegrbuf;
-	__u64 spi_subport_rcvhdr_base;
-
-	/* shared memory page for hardware port if it is shared */
-	__u64 spi_port_uregbase;
-	__u64 spi_port_rcvegrbuf;
-	__u64 spi_port_rcvhdr_base;
-	__u64 spi_port_rcvhdr_tailaddr;
-
-} __attribute__ ((aligned(8)));
-
-
-/*
- * This version number is given to the driver by the user code during
- * initialization in the spu_userversion field of ipath_user_info, so
- * the driver can check for compatibility with user code.
- *
- * The major version changes when data structures
- * change in an incompatible way.  The driver must be the same or higher
- * for initialization to succeed.  In some cases, a higher version
- * driver will not interoperate with older software, and initialization
- * will return an error.
- */
-#define IPATH_USER_SWMAJOR 1
-
-/*
- * Minor version differences are always compatible
- * a within a major version, however if user software is larger
- * than driver software, some new features and/or structure fields
- * may not be implemented; the user code must deal with this if it
- * cares, or it must abort after initialization reports the difference.
- */
-#define IPATH_USER_SWMINOR 6
-
-#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
-
-#define IPATH_KERN_TYPE 0
-
-/*
- * Similarly, this is the kernel version going back to the user.  It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a QLogic release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of ipath_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
-
-/*
- * This structure is passed to ipath_userinit() to tell the driver where
- * user code buffers are, sizes, etc.   The offsets and sizes of the
- * fields must remain unchanged, for binary compatibility.  It can
- * be extended, if userversion is changed so user code can tell, if needed
- */
-struct ipath_user_info {
-	/*
-	 * version of user software, to detect compatibility issues.
-	 * Should be set to IPATH_USER_SWVERSION.
-	 */
-	__u32 spu_userversion;
-
-	/* desired number of receive header queue entries */
-	__u32 spu_rcvhdrcnt;
-
-	/* size of struct base_info to write to */
-	__u32 spu_base_info_size;
-
-	/*
-	 * number of words in KD protocol header
-	 * This tells InfiniPath how many words to copy to rcvhdrq.  If 0,
-	 * kernel uses a default.  Once set, attempts to set any other value
-	 * are an error (EAGAIN) until driver is reloaded.
-	 */
-	__u32 spu_rcvhdrsize;
-
-	/*
-	 * If two or more processes wish to share a port, each process
-	 * must set the spu_subport_cnt and spu_subport_id to the same
-	 * values.  The only restriction on the spu_subport_id is that
-	 * it be unique for a given node.
-	 */
-	__u16 spu_subport_cnt;
-	__u16 spu_subport_id;
-
-	__u32 spu_unused; /* kept for compatible layout */
-
-	/*
-	 * address of struct base_info to write to
-	 */
-	__u64 spu_base_info;
-
-} __attribute__ ((aligned(8)));
-
-/* User commands. */
-
-#define IPATH_CMD_MIN		16
-
-#define __IPATH_CMD_USER_INIT	16	/* old set up userspace (for old user code) */
-#define IPATH_CMD_PORT_INFO	17	/* find out what resources we got */
-#define IPATH_CMD_RECV_CTRL	18	/* control receipt of packets */
-#define IPATH_CMD_TID_UPDATE	19	/* update expected TID entries */
-#define IPATH_CMD_TID_FREE	20	/* free expected TID entries */
-#define IPATH_CMD_SET_PART_KEY	21	/* add partition key */
-#define __IPATH_CMD_SLAVE_INFO	22	/* return info on slave processes (for old user code) */
-#define IPATH_CMD_ASSIGN_PORT	23	/* allocate HCA and port */
-#define IPATH_CMD_USER_INIT 	24	/* set up userspace */
-#define IPATH_CMD_UNUSED_1	25
-#define IPATH_CMD_UNUSED_2	26
-#define IPATH_CMD_PIOAVAILUPD	27	/* force an update of PIOAvail reg */
-#define IPATH_CMD_POLL_TYPE	28	/* set the kind of polling we want */
-#define IPATH_CMD_ARMLAUNCH_CTRL	29 /* armlaunch detection control */
-/* 30 is unused */
-#define IPATH_CMD_SDMA_INFLIGHT 31	/* sdma inflight counter request */
-#define IPATH_CMD_SDMA_COMPLETE 32	/* sdma completion counter request */
-
-/*
- * Poll types
- */
-#define IPATH_POLL_TYPE_URGENT	 0x01
-#define IPATH_POLL_TYPE_OVERFLOW 0x02
-
-struct ipath_port_info {
-	__u32 num_active;	/* number of active units */
-	__u32 unit;		/* unit (chip) assigned to caller */
-	__u16 port;		/* port on unit assigned to caller */
-	__u16 subport;		/* subport on unit assigned to caller */
-	__u16 num_ports;	/* number of ports available on unit */
-	__u16 num_subports;	/* number of subports opened on port */
-};
-
-struct ipath_tid_info {
-	__u32 tidcnt;
-	/* make structure same size in 32 and 64 bit */
-	__u32 tid__unused;
-	/* virtual address of first page in transfer */
-	__u64 tidvaddr;
-	/* pointer (same size 32/64 bit) to __u16 tid array */
-	__u64 tidlist;
-
-	/*
-	 * pointer (same size 32/64 bit) to bitmap of TIDs used
-	 * for this call; checked for being large enough at open
-	 */
-	__u64 tidmap;
-};
-
-struct ipath_cmd {
-	__u32 type;			/* command type */
-	union {
-		struct ipath_tid_info tid_info;
-		struct ipath_user_info user_info;
-
-		/*
-		 * address in userspace where we should put the sdma
-		 * inflight counter
-		 */
-		__u64 sdma_inflight;
-		/*
-		 * address in userspace where we should put the sdma
-		 * completion counter
-		 */
-		__u64 sdma_complete;
-		/* address in userspace of struct ipath_port_info to
-		   write result to */
-		__u64 port_info;
-		/* enable/disable receipt of packets */
-		__u32 recv_ctrl;
-		/* enable/disable armlaunch errors (non-zero to enable) */
-		__u32 armlaunch_ctrl;
-		/* partition key to set */
-		__u16 part_key;
-		/* user address of __u32 bitmask of active slaves */
-		__u64 slave_mask_addr;
-		/* type of polling we want */
-		__u16 poll_type;
-	} cmd;
-};
-
-struct ipath_iovec {
-	/* Pointer to data, but same size 32 and 64 bit */
-	__u64 iov_base;
-
-	/*
-	 * Length of data; don't need 64 bits, but want
-	 * ipath_sendpkt to remain same size as before 32 bit changes, so...
-	 */
-	__u64 iov_len;
-};
-
-/*
- * Describes a single packet for send.  Each packet can have one or more
- * buffers, but the total length (exclusive of IB headers) must be less
- * than the MTU, and if using the PIO method, entire packet length,
- * including IB headers, must be less than the ipath_piosize value (words).
- * Use of this necessitates including sys/uio.h
- */
-struct __ipath_sendpkt {
-	__u32 sps_flags;	/* flags for packet (TBD) */
-	__u32 sps_cnt;		/* number of entries to use in sps_iov */
-	/* array of iov's describing packet. TEMPORARY */
-	struct ipath_iovec sps_iov[4];
-};
-
-/*
- * diagnostics can send a packet by "writing" one of the following
- * two structs to diag data special file
- * The first is the legacy version for backward compatibility
- */
-struct ipath_diag_pkt {
-	__u32 unit;
-	__u64 data;
-	__u32 len;
-};
-
-/* The second diag_pkt struct is the expanded version that allows
- * more control over the packet, specifically, by allowing a custom
- * pbc (+ static rate) qword, so that special modes and deliberate
- * changes to CRCs can be used. The elements were also re-ordered
- * for better alignment and to avoid padding issues.
- */
-struct ipath_diag_xpkt {
-	__u64 data;
-	__u64 pbc_wd;
-	__u32 unit;
-	__u32 len;
-};
-
-/*
- * Data layout in I2C flash (for GUID, etc.)
- * All fields are little-endian binary unless otherwise stated
- */
-#define IPATH_FLASH_VERSION 2
-struct ipath_flash {
-	/* flash layout version (IPATH_FLASH_VERSION) */
-	__u8 if_fversion;
-	/* checksum protecting if_length bytes */
-	__u8 if_csum;
-	/*
-	 * valid length (in use, protected by if_csum), including
-	 * if_fversion and if_csum themselves)
-	 */
-	__u8 if_length;
-	/* the GUID, in network order */
-	__u8 if_guid[8];
-	/* number of GUIDs to use, starting from if_guid */
-	__u8 if_numguid;
-	/* the (last 10 characters of) board serial number, in ASCII */
-	char if_serial[12];
-	/* board mfg date (YYYYMMDD ASCII) */
-	char if_mfgdate[8];
-	/* last board rework/test date (YYYYMMDD ASCII) */
-	char if_testdate[8];
-	/* logging of error counts, TBD */
-	__u8 if_errcntp[4];
-	/* powered on hours, updated at driver unload */
-	__u8 if_powerhour[2];
-	/* ASCII free-form comment field */
-	char if_comment[32];
-	/* Backwards compatible prefix for longer QLogic Serial Numbers */
-	char if_sprefix[4];
-	/* 82 bytes used, min flash size is 128 bytes */
-	__u8 if_future[46];
-};
-
-/*
- * These are the counters implemented in the chip, and are listed in order.
- * The InterCaps naming is taken straight from the chip spec.
- */
-struct infinipath_counters {
-	__u64 LBIntCnt;
-	__u64 LBFlowStallCnt;
-	__u64 TxSDmaDescCnt;	/* was Reserved1 */
-	__u64 TxUnsupVLErrCnt;
-	__u64 TxDataPktCnt;
-	__u64 TxFlowPktCnt;
-	__u64 TxDwordCnt;
-	__u64 TxLenErrCnt;
-	__u64 TxMaxMinLenErrCnt;
-	__u64 TxUnderrunCnt;
-	__u64 TxFlowStallCnt;
-	__u64 TxDroppedPktCnt;
-	__u64 RxDroppedPktCnt;
-	__u64 RxDataPktCnt;
-	__u64 RxFlowPktCnt;
-	__u64 RxDwordCnt;
-	__u64 RxLenErrCnt;
-	__u64 RxMaxMinLenErrCnt;
-	__u64 RxICRCErrCnt;
-	__u64 RxVCRCErrCnt;
-	__u64 RxFlowCtrlErrCnt;
-	__u64 RxBadFormatCnt;
-	__u64 RxLinkProblemCnt;
-	__u64 RxEBPCnt;
-	__u64 RxLPCRCErrCnt;
-	__u64 RxBufOvflCnt;
-	__u64 RxTIDFullErrCnt;
-	__u64 RxTIDValidErrCnt;
-	__u64 RxPKeyMismatchCnt;
-	__u64 RxP0HdrEgrOvflCnt;
-	__u64 RxP1HdrEgrOvflCnt;
-	__u64 RxP2HdrEgrOvflCnt;
-	__u64 RxP3HdrEgrOvflCnt;
-	__u64 RxP4HdrEgrOvflCnt;
-	__u64 RxP5HdrEgrOvflCnt;
-	__u64 RxP6HdrEgrOvflCnt;
-	__u64 RxP7HdrEgrOvflCnt;
-	__u64 RxP8HdrEgrOvflCnt;
-	__u64 RxP9HdrEgrOvflCnt;	/* was Reserved6 */
-	__u64 RxP10HdrEgrOvflCnt;	/* was Reserved7 */
-	__u64 RxP11HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP12HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP13HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP14HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP15HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP16HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 IBStatusChangeCnt;
-	__u64 IBLinkErrRecoveryCnt;
-	__u64 IBLinkDownedCnt;
-	__u64 IBSymbolErrCnt;
-	/* The following are new for IBA7220 */
-	__u64 RxVL15DroppedPktCnt;
-	__u64 RxOtherLocalPhyErrCnt;
-	__u64 PcieRetryBufDiagQwordCnt;
-	__u64 ExcessBufferOvflCnt;
-	__u64 LocalLinkIntegrityErrCnt;
-	__u64 RxVlErrCnt;
-	__u64 RxDlidFltrCnt;
-};
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software
- * The other bits that are used only by the driver or diags are in
- * ipath_registers.h
- */
-
-/* RcvHdrFlags bits */
-#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
-#define INFINIPATH_RHF_LENGTH_SHIFT 0
-#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
-#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
-#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
-#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
-#define INFINIPATH_RHF_SEQ_MASK 0xF
-#define INFINIPATH_RHF_SEQ_SHIFT 0
-#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
-#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
-#define INFINIPATH_RHF_H_ICRCERR   0x80000000
-#define INFINIPATH_RHF_H_VCRCERR   0x40000000
-#define INFINIPATH_RHF_H_PARITYERR 0x20000000
-#define INFINIPATH_RHF_H_LENERR    0x10000000
-#define INFINIPATH_RHF_H_MTUERR    0x08000000
-#define INFINIPATH_RHF_H_IHDRERR   0x04000000
-#define INFINIPATH_RHF_H_TIDERR    0x02000000
-#define INFINIPATH_RHF_H_MKERR     0x01000000
-#define INFINIPATH_RHF_H_IBERR     0x00800000
-#define INFINIPATH_RHF_H_ERR_MASK  0xFF800000
-#define INFINIPATH_RHF_L_USE_EGR   0x80000000
-#define INFINIPATH_RHF_L_SWA       0x00008000
-#define INFINIPATH_RHF_L_SWB       0x00004000
-
-/* infinipath header fields */
-#define INFINIPATH_I_VERS_MASK 0xF
-#define INFINIPATH_I_VERS_SHIFT 28
-#define INFINIPATH_I_PORT_MASK 0xF
-#define INFINIPATH_I_PORT_SHIFT 24
-#define INFINIPATH_I_TID_MASK 0x7FF
-#define INFINIPATH_I_TID_SHIFT 13
-#define INFINIPATH_I_OFFSET_MASK 0x1FFF
-#define INFINIPATH_I_OFFSET_SHIFT 0
-
-/* K_PktFlags bits */
-#define INFINIPATH_KPF_INTR 0x1
-#define INFINIPATH_KPF_SUBPORT_MASK 0x3
-#define INFINIPATH_KPF_SUBPORT_SHIFT 1
-
-#define INFINIPATH_MAX_SUBPORT	4
-
-/* SendPIO per-buffer control */
-#define INFINIPATH_SP_TEST    0x40
-#define INFINIPATH_SP_TESTEBP 0x20
-#define INFINIPATH_SP_TRIGGER_SHIFT  15
-
-/* SendPIOAvail bits */
-#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
-#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
-
-/* infinipath header format */
-struct ipath_header {
-	/*
-	 * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
-	 * 14 bits before ECO change ~28 Dec 03.  After that, Vers 4,
-	 * Port 4, TID 11, offset 13.
-	 */
-	__le32 ver_port_tid_offset;
-	__le16 chksum;
-	__le16 pkt_flags;
-};
-
-/* infinipath user message header format.
- * This structure contains the first 4 fields common to all protocols
- * that employ infinipath.
- */
-struct ipath_message_header {
-	__be16 lrh[4];
-	__be32 bth[3];
-	/* fields below this point are in host byte order */
-	struct ipath_header iph;
-	__u8 sub_opcode;
-};
-
-/* infinipath ethernet header format */
-struct ether_header {
-	__be16 lrh[4];
-	__be32 bth[3];
-	struct ipath_header iph;
-	__u8 sub_opcode;
-	__u8 cmd;
-	__be16 lid;
-	__u16 mac[3];
-	__u8 frag_num;
-	__u8 seq_num;
-	__le32 len;
-	/* MUST be of word size due to PIO write requirements */
-	__le32 csum;
-	__le16 csum_offset;
-	__le16 flags;
-	__u16 first_2_bytes;
-	__u8 unused[2];		/* currently unused */
-};
-
-
-/* IB - LRH header consts */
-#define IPATH_LRH_GRH 0x0003	/* 1. word of IB LRH - next header: GRH */
-#define IPATH_LRH_BTH 0x0002	/* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define IPATH_DEFAULT_P_KEY 0xFFFF
-#define IPATH_PERMISSIVE_LID 0xFFFF
-#define IPATH_AETH_CREDIT_SHIFT 24
-#define IPATH_AETH_CREDIT_MASK 0x1F
-#define IPATH_AETH_CREDIT_INVAL 0x1F
-#define IPATH_PSN_MASK 0xFFFFFF
-#define IPATH_MSN_MASK 0xFFFFFF
-#define IPATH_QPN_MASK 0xFFFFFF
-#define IPATH_MULTICAST_LID_BASE 0xC000
-#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
-#define IPATH_MULTICAST_QPN 0xFFFFFF
-
-/* Receive Header Queue: receive type (from infinipath) */
-#define RCVHQ_RCV_TYPE_EXPECTED  0
-#define RCVHQ_RCV_TYPE_EAGER     1
-#define RCVHQ_RCV_TYPE_NON_KD    2
-#define RCVHQ_RCV_TYPE_ERROR     3
-
-
-/* sub OpCodes - ith4x  */
-#define IPATH_ITH4X_OPCODE_ENCAP 0x81
-#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
-
-#define IPATH_HEADER_QUEUE_WORDS 9
-
-/* functions for extracting fields from rcvhdrq entries for the driver.
- */
-static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
-{
-	return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
-}
-
-static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
-{
-	return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
-	    & INFINIPATH_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
-{
-	return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
-		& INFINIPATH_RHF_LENGTH_MASK) << 2;
-}
-
-static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
-{
-	return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
-	    & INFINIPATH_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
-{
-	return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
-		& INFINIPATH_RHF_SEQ_MASK;
-}
-
-static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
-{
-	return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
-		& INFINIPATH_RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
-{
-	return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
-}
-
-static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
-{
-	return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
-	    & INFINIPATH_I_VERS_MASK;
-}
-
-#endif				/* _IPATH_COMMON_H */
diff --git a/drivers/staging/rdma/ipath/ipath_cq.c b/drivers/staging/rdma/ipath/ipath_cq.c
deleted file mode 100644
index e9dd911..0000000
--- a/drivers/staging/rdma/ipath/ipath_cq.c
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_cq_enter - add a new entry to the completion queue
- * @cq: completion queue
- * @entry: work completion entry to add
- * @sig: true if @entry is a solicitated entry
- *
- * This may be called with qp->s_lock held.
- */
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
-{
-	struct ipath_cq_wc *wc;
-	unsigned long flags;
-	u32 head;
-	u32 next;
-
-	spin_lock_irqsave(&cq->lock, flags);
-
-	/*
-	 * Note that the head pointer might be writable by user processes.
-	 * Take care to verify it is a sane value.
-	 */
-	wc = cq->queue;
-	head = wc->head;
-	if (head >= (unsigned) cq->ibcq.cqe) {
-		head = cq->ibcq.cqe;
-		next = 0;
-	} else
-		next = head + 1;
-	if (unlikely(next == wc->tail)) {
-		spin_unlock_irqrestore(&cq->lock, flags);
-		if (cq->ibcq.event_handler) {
-			struct ib_event ev;
-
-			ev.device = cq->ibcq.device;
-			ev.element.cq = &cq->ibcq;
-			ev.event = IB_EVENT_CQ_ERR;
-			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
-		}
-		return;
-	}
-	if (cq->ip) {
-		wc->uqueue[head].wr_id = entry->wr_id;
-		wc->uqueue[head].status = entry->status;
-		wc->uqueue[head].opcode = entry->opcode;
-		wc->uqueue[head].vendor_err = entry->vendor_err;
-		wc->uqueue[head].byte_len = entry->byte_len;
-		wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
-		wc->uqueue[head].qp_num = entry->qp->qp_num;
-		wc->uqueue[head].src_qp = entry->src_qp;
-		wc->uqueue[head].wc_flags = entry->wc_flags;
-		wc->uqueue[head].pkey_index = entry->pkey_index;
-		wc->uqueue[head].slid = entry->slid;
-		wc->uqueue[head].sl = entry->sl;
-		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
-		wc->uqueue[head].port_num = entry->port_num;
-		/* Make sure entry is written before the head index. */
-		smp_wmb();
-	} else
-		wc->kqueue[head] = *entry;
-	wc->head = next;
-
-	if (cq->notify == IB_CQ_NEXT_COMP ||
-	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
-		cq->notify = IB_CQ_NONE;
-		cq->triggered++;
-		/*
-		 * This will cause send_complete() to be called in
-		 * another thread.
-		 */
-		tasklet_hi_schedule(&cq->comptask);
-	}
-
-	spin_unlock_irqrestore(&cq->lock, flags);
-
-	if (entry->status != IB_WC_SUCCESS)
-		to_idev(cq->ibcq.device)->n_wqe_errs++;
-}
-
-/**
- * ipath_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context.  Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
-	struct ipath_cq *cq = to_icq(ibcq);
-	struct ipath_cq_wc *wc;
-	unsigned long flags;
-	int npolled;
-	u32 tail;
-
-	/* The kernel can only poll a kernel completion queue */
-	if (cq->ip) {
-		npolled = -EINVAL;
-		goto bail;
-	}
-
-	spin_lock_irqsave(&cq->lock, flags);
-
-	wc = cq->queue;
-	tail = wc->tail;
-	if (tail > (u32) cq->ibcq.cqe)
-		tail = (u32) cq->ibcq.cqe;
-	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
-		if (tail == wc->head)
-			break;
-		/* The kernel doesn't need a RMB since it has the lock. */
-		*entry = wc->kqueue[tail];
-		if (tail >= cq->ibcq.cqe)
-			tail = 0;
-		else
-			tail++;
-	}
-	wc->tail = tail;
-
-	spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
-	return npolled;
-}
-
-static void send_complete(unsigned long data)
-{
-	struct ipath_cq *cq = (struct ipath_cq *)data;
-
-	/*
-	 * The completion handler will most likely rearm the notification
-	 * and poll for all pending entries.  If a new completion entry
-	 * is added while we are in this routine, tasklet_hi_schedule()
-	 * won't call us again until we return so we check triggered to
-	 * see if we need to call the handler again.
-	 */
-	for (;;) {
-		u8 triggered = cq->triggered;
-
-		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
-
-		if (cq->triggered == triggered)
-			return;
-	}
-}
-
-/**
- * ipath_create_cq - create a completion queue
- * @ibdev: the device this completion queue is attached to
- * @attr: creation attributes
- * @context: unused by the InfiniPath driver
- * @udata: unused by the InfiniPath driver
- *
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
- * Called by ib_create_cq() in the generic verbs code.
- */
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
-			      const struct ib_cq_init_attr *attr,
-			      struct ib_ucontext *context,
-			      struct ib_udata *udata)
-{
-	int entries = attr->cqe;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_cq *cq;
-	struct ipath_cq_wc *wc;
-	struct ib_cq *ret;
-	u32 sz;
-
-	if (attr->flags)
-		return ERR_PTR(-EINVAL);
-
-	if (entries < 1 || entries > ib_ipath_max_cqes) {
-		ret = ERR_PTR(-EINVAL);
-		goto done;
-	}
-
-	/* Allocate the completion queue structure. */
-	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
-	if (!cq) {
-		ret = ERR_PTR(-ENOMEM);
-		goto done;
-	}
-
-	/*
-	 * Allocate the completion queue entries and head/tail pointers.
-	 * This is allocated separately so that it can be resized and
-	 * also mapped into user space.
-	 * We need to use vmalloc() in order to support mmap and large
-	 * numbers of entries.
-	 */
-	sz = sizeof(*wc);
-	if (udata && udata->outlen >= sizeof(__u64))
-		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
-	else
-		sz += sizeof(struct ib_wc) * (entries + 1);
-	wc = vmalloc_user(sz);
-	if (!wc) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail_cq;
-	}
-
-	/*
-	 * Return the address of the WC as the offset to mmap.
-	 * See ipath_mmap() for details.
-	 */
-	if (udata && udata->outlen >= sizeof(__u64)) {
-		int err;
-
-		cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
-		if (!cq->ip) {
-			ret = ERR_PTR(-ENOMEM);
-			goto bail_wc;
-		}
-
-		err = ib_copy_to_udata(udata, &cq->ip->offset,
-				       sizeof(cq->ip->offset));
-		if (err) {
-			ret = ERR_PTR(err);
-			goto bail_ip;
-		}
-	} else
-		cq->ip = NULL;
-
-	spin_lock(&dev->n_cqs_lock);
-	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
-		spin_unlock(&dev->n_cqs_lock);
-		ret = ERR_PTR(-ENOMEM);
-		goto bail_ip;
-	}
-
-	dev->n_cqs_allocated++;
-	spin_unlock(&dev->n_cqs_lock);
-
-	if (cq->ip) {
-		spin_lock_irq(&dev->pending_lock);
-		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
-		spin_unlock_irq(&dev->pending_lock);
-	}
-
-	/*
-	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
-	 * The number of entries should be >= the number requested or return
-	 * an error.
-	 */
-	cq->ibcq.cqe = entries;
-	cq->notify = IB_CQ_NONE;
-	cq->triggered = 0;
-	spin_lock_init(&cq->lock);
-	tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
-	wc->head = 0;
-	wc->tail = 0;
-	cq->queue = wc;
-
-	ret = &cq->ibcq;
-
-	goto done;
-
-bail_ip:
-	kfree(cq->ip);
-bail_wc:
-	vfree(wc);
-bail_cq:
-	kfree(cq);
-done:
-	return ret;
-}
-
-/**
- * ipath_destroy_cq - destroy a completion queue
- * @ibcq: the completion queue to destroy.
- *
- * Returns 0 for success.
- *
- * Called by ib_destroy_cq() in the generic verbs code.
- */
-int ipath_destroy_cq(struct ib_cq *ibcq)
-{
-	struct ipath_ibdev *dev = to_idev(ibcq->device);
-	struct ipath_cq *cq = to_icq(ibcq);
-
-	tasklet_kill(&cq->comptask);
-	spin_lock(&dev->n_cqs_lock);
-	dev->n_cqs_allocated--;
-	spin_unlock(&dev->n_cqs_lock);
-	if (cq->ip)
-		kref_put(&cq->ip->ref, ipath_release_mmap_info);
-	else
-		vfree(cq->queue);
-	kfree(cq);
-
-	return 0;
-}
-
-/**
- * ipath_req_notify_cq - change the notification type for a completion queue
- * @ibcq: the completion queue
- * @notify_flags: the type of notification to request
- *
- * Returns 0 for success.
- *
- * This may be called from interrupt context.  Also called by
- * ib_req_notify_cq() in the generic verbs code.
- */
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
-	struct ipath_cq *cq = to_icq(ibcq);
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&cq->lock, flags);
-	/*
-	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
-	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
-	 */
-	if (cq->notify != IB_CQ_NEXT_COMP)
-		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
-
-	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
-	    cq->queue->head != cq->queue->tail)
-		ret = 1;
-
-	spin_unlock_irqrestore(&cq->lock, flags);
-
-	return ret;
-}
-
-/**
- * ipath_resize_cq - change the size of the CQ
- * @ibcq: the completion queue
- *
- * Returns 0 for success.
- */
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
-{
-	struct ipath_cq *cq = to_icq(ibcq);
-	struct ipath_cq_wc *old_wc;
-	struct ipath_cq_wc *wc;
-	u32 head, tail, n;
-	int ret;
-	u32 sz;
-
-	if (cqe < 1 || cqe > ib_ipath_max_cqes) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/*
-	 * Need to use vmalloc() if we want to support large #s of entries.
-	 */
-	sz = sizeof(*wc);
-	if (udata && udata->outlen >= sizeof(__u64))
-		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
-	else
-		sz += sizeof(struct ib_wc) * (cqe + 1);
-	wc = vmalloc_user(sz);
-	if (!wc) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	/* Check that we can write the offset to mmap. */
-	if (udata && udata->outlen >= sizeof(__u64)) {
-		__u64 offset = 0;
-
-		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
-		if (ret)
-			goto bail_free;
-	}
-
-	spin_lock_irq(&cq->lock);
-	/*
-	 * Make sure head and tail are sane since they
-	 * might be user writable.
-	 */
-	old_wc = cq->queue;
-	head = old_wc->head;
-	if (head > (u32) cq->ibcq.cqe)
-		head = (u32) cq->ibcq.cqe;
-	tail = old_wc->tail;
-	if (tail > (u32) cq->ibcq.cqe)
-		tail = (u32) cq->ibcq.cqe;
-	if (head < tail)
-		n = cq->ibcq.cqe + 1 + head - tail;
-	else
-		n = head - tail;
-	if (unlikely((u32)cqe < n)) {
-		ret = -EINVAL;
-		goto bail_unlock;
-	}
-	for (n = 0; tail != head; n++) {
-		if (cq->ip)
-			wc->uqueue[n] = old_wc->uqueue[tail];
-		else
-			wc->kqueue[n] = old_wc->kqueue[tail];
-		if (tail == (u32) cq->ibcq.cqe)
-			tail = 0;
-		else
-			tail++;
-	}
-	cq->ibcq.cqe = cqe;
-	wc->head = n;
-	wc->tail = 0;
-	cq->queue = wc;
-	spin_unlock_irq(&cq->lock);
-
-	vfree(old_wc);
-
-	if (cq->ip) {
-		struct ipath_ibdev *dev = to_idev(ibcq->device);
-		struct ipath_mmap_info *ip = cq->ip;
-
-		ipath_update_mmap_info(dev, ip, sz, wc);
-
-		/*
-		 * Return the offset to mmap.
-		 * See ipath_mmap() for details.
-		 */
-		if (udata && udata->outlen >= sizeof(__u64)) {
-			ret = ib_copy_to_udata(udata, &ip->offset,
-					       sizeof(ip->offset));
-			if (ret)
-				goto bail;
-		}
-
-		spin_lock_irq(&dev->pending_lock);
-		if (list_empty(&ip->pending_mmaps))
-			list_add(&ip->pending_mmaps, &dev->pending_mmaps);
-		spin_unlock_irq(&dev->pending_lock);
-	}
-
-	ret = 0;
-	goto bail;
-
-bail_unlock:
-	spin_unlock_irq(&cq->lock);
-bail_free:
-	vfree(wc);
-bail:
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_debug.h b/drivers/staging/rdma/ipath/ipath_debug.h
deleted file mode 100644
index 65926cd..0000000
--- a/drivers/staging/rdma/ipath/ipath_debug.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_DEBUG_H
-#define _IPATH_DEBUG_H
-
-#ifndef _IPATH_DEBUGGING	/* debugging enabled or not */
-#define _IPATH_DEBUGGING 1
-#endif
-
-#if _IPATH_DEBUGGING
-
-/*
- * Mask values for debugging.  The scheme allows us to compile out any
- * of the debug tracing stuff, and if compiled in, to enable or disable
- * dynamically.  This can be set at modprobe time also:
- *      modprobe infinipath.ko infinipath_debug=7
- */
-
-#define __IPATH_INFO        0x1	/* generic low verbosity stuff */
-#define __IPATH_DBG         0x2	/* generic debug */
-#define __IPATH_TRSAMPLE    0x8	/* generate trace buffer sample entries */
-/* leave some low verbosity spots open */
-#define __IPATH_VERBDBG     0x40	/* very verbose debug */
-#define __IPATH_PKTDBG      0x80	/* print packet data */
-/* print process startup (init)/exit messages */
-#define __IPATH_PROCDBG     0x100
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG       0x200
-#define __IPATH_ERRPKTDBG   0x400
-#define __IPATH_USER_SEND   0x1000	/* use user mode send */
-#define __IPATH_KERNEL_SEND 0x2000	/* use kernel mode send */
-#define __IPATH_EPKTDBG     0x4000	/* print ethernet packet data */
-#define __IPATH_IPATHDBG    0x10000	/* Ethernet (IPATH) gen debug */
-#define __IPATH_IPATHWARN   0x20000	/* Ethernet (IPATH) warnings */
-#define __IPATH_IPATHERR    0x40000	/* Ethernet (IPATH) errors */
-#define __IPATH_IPATHPD     0x80000	/* Ethernet (IPATH) packet dump */
-#define __IPATH_IPATHTABLE  0x100000	/* Ethernet (IPATH) table dump */
-#define __IPATH_LINKVERBDBG 0x200000	/* very verbose linkchange debug */
-
-#else				/* _IPATH_DEBUGGING */
-
-/*
- * define all of these even with debugging off, for the few places that do
- * if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
- * compiler eliminate the code
- */
-
-#define __IPATH_INFO      0x0	/* generic low verbosity stuff */
-#define __IPATH_DBG       0x0	/* generic debug */
-#define __IPATH_TRSAMPLE  0x0	/* generate trace buffer sample entries */
-#define __IPATH_VERBDBG   0x0	/* very verbose debug */
-#define __IPATH_PKTDBG    0x0	/* print packet data */
-#define __IPATH_PROCDBG   0x0	/* process startup (init)/exit messages */
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG     0x0
-#define __IPATH_EPKTDBG   0x0	/* print ethernet packet data */
-#define __IPATH_IPATHDBG  0x0	/* Ethernet (IPATH) table dump on */
-#define __IPATH_IPATHWARN 0x0	/* Ethernet (IPATH) warnings on   */
-#define __IPATH_IPATHERR  0x0	/* Ethernet (IPATH) errors on   */
-#define __IPATH_IPATHPD   0x0	/* Ethernet (IPATH) packet dump on   */
-#define __IPATH_IPATHTABLE 0x0	/* Ethernet (IPATH) packet dump on   */
-#define __IPATH_LINKVERBDBG 0x0	/* very verbose linkchange debug */
-
-#endif				/* _IPATH_DEBUGGING */
-
-#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
-
-#endif				/* _IPATH_DEBUG_H */
diff --git a/drivers/staging/rdma/ipath/ipath_diag.c b/drivers/staging/rdma/ipath/ipath_diag.c
deleted file mode 100644
index 45802e9..0000000
--- a/drivers/staging/rdma/ipath/ipath_diag.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains support for diagnostic functions.  It is accessed by
- * opening the ipath_diag device, normally minor number 129.  Diagnostic use
- * of the InfiniPath chip may render the chip or board unusable until the
- * driver is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/export.h>
-#include <asm/uaccess.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-int ipath_diag_inuse;
-static int diag_set_link;
-
-static int ipath_diag_open(struct inode *in, struct file *fp);
-static int ipath_diag_release(struct inode *in, struct file *fp);
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
-			       size_t count, loff_t *off);
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
-				size_t count, loff_t *off);
-
-static const struct file_operations diag_file_ops = {
-	.owner = THIS_MODULE,
-	.write = ipath_diag_write,
-	.read = ipath_diag_read,
-	.open = ipath_diag_open,
-	.release = ipath_diag_release,
-	.llseek = default_llseek,
-};
-
-static ssize_t ipath_diagpkt_write(struct file *fp,
-				   const char __user *data,
-				   size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
-	.owner = THIS_MODULE,
-	.write = ipath_diagpkt_write,
-	.llseek = noop_llseek,
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev *diagpkt_cdev;
-static struct device *diagpkt_dev;
-
-int ipath_diag_add(struct ipath_devdata *dd)
-{
-	char name[16];
-	int ret = 0;
-
-	if (atomic_inc_return(&diagpkt_count) == 1) {
-		ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
-				      "ipath_diagpkt", &diagpkt_file_ops,
-				      &diagpkt_cdev, &diagpkt_dev);
-
-		if (ret) {
-			ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
-				      "device: %d", ret);
-			goto done;
-		}
-	}
-
-	snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
-
-	ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
-			      &diag_file_ops, &dd->diag_cdev,
-			      &dd->diag_dev);
-	if (ret)
-		ipath_dev_err(dd, "Couldn't create %s device: %d",
-			      name, ret);
-
-done:
-	return ret;
-}
-
-void ipath_diag_remove(struct ipath_devdata *dd)
-{
-	if (atomic_dec_and_test(&diagpkt_count))
-		ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_dev);
-
-	ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_dev);
-}
-
-/**
- * ipath_read_umem64 - read a 64-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy (multiple of 32 bits)
- *
- * This function also localizes all chip memory accesses.
- * The copy should be written such that we read full cacheline packets
- * from the chip.  This is usually used for a single qword
- *
- * NOTE:  This assumes the chip address is 64-bit aligned.
- */
-static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
-			     const void __iomem *caddr, size_t count)
-{
-	const u64 __iomem *reg_addr = caddr;
-	const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
-	int ret;
-
-	/* not very efficient, but it works for now */
-	if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	while (reg_addr < reg_end) {
-		u64 data = readq(reg_addr);
-		if (copy_to_user(uaddr, &data, sizeof(u64))) {
-			ret = -EFAULT;
-			goto bail;
-		}
-		reg_addr++;
-		uaddr += sizeof(u64);
-	}
-	ret = 0;
-bail:
-	return ret;
-}
-
-/**
- * ipath_write_umem64 - write a 64-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: the number of bytes to copy (multiple of 32 bits)
- *
- * This is usually used for a single qword
- * NOTE:  This assumes the chip address is 64-bit aligned.
- */
-
-static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
-			      const void __user *uaddr, size_t count)
-{
-	u64 __iomem *reg_addr = caddr;
-	const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
-	int ret;
-
-	/* not very efficient, but it works for now */
-	if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	while (reg_addr < reg_end) {
-		u64 data;
-		if (copy_from_user(&data, uaddr, sizeof(data))) {
-			ret = -EFAULT;
-			goto bail;
-		}
-		writeq(data, reg_addr);
-
-		reg_addr++;
-		uaddr += sizeof(u64);
-	}
-	ret = 0;
-bail:
-	return ret;
-}
-
-/**
- * ipath_read_umem32 - read a 32-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy
- *
- * read 32 bit values, not 64 bit; for memories that only
- * support 32 bit reads; usually a single dword.
- */
-static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
-			     const void __iomem *caddr, size_t count)
-{
-	const u32 __iomem *reg_addr = caddr;
-	const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
-	int ret;
-
-	if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
-	    reg_end > (u32 __iomem *) dd->ipath_kregend) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	/* not very efficient, but it works for now */
-	while (reg_addr < reg_end) {
-		u32 data = readl(reg_addr);
-		if (copy_to_user(uaddr, &data, sizeof(data))) {
-			ret = -EFAULT;
-			goto bail;
-		}
-
-		reg_addr++;
-		uaddr += sizeof(u32);
-
-	}
-	ret = 0;
-bail:
-	return ret;
-}
-
-/**
- * ipath_write_umem32 - write a 32-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: number of bytes to copy
- *
- * write 32 bit values, not 64 bit; for memories that only
- * support 32 bit write; usually a single dword.
- */
-
-static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
-			      const void __user *uaddr, size_t count)
-{
-	u32 __iomem *reg_addr = caddr;
-	const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
-	int ret;
-
-	if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
-	    reg_end > (u32 __iomem *) dd->ipath_kregend) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	while (reg_addr < reg_end) {
-		u32 data;
-		if (copy_from_user(&data, uaddr, sizeof(data))) {
-			ret = -EFAULT;
-			goto bail;
-		}
-		writel(data, reg_addr);
-
-		reg_addr++;
-		uaddr += sizeof(u32);
-	}
-	ret = 0;
-bail:
-	return ret;
-}
-
-static int ipath_diag_open(struct inode *in, struct file *fp)
-{
-	int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
-	struct ipath_devdata *dd;
-	int ret;
-
-	mutex_lock(&ipath_mutex);
-
-	if (ipath_diag_inuse) {
-		ret = -EBUSY;
-		goto bail;
-	}
-
-	dd = ipath_lookup(unit);
-
-	if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
-	    !dd->ipath_kregbase) {
-		ret = -ENODEV;
-		goto bail;
-	}
-
-	fp->private_data = dd;
-	ipath_diag_inuse = -2;
-	diag_set_link = 0;
-	ret = 0;
-
-	/* Only expose a way to reset the device if we
-	   make it into diag mode. */
-	ipath_expose_reset(&dd->pcidev->dev);
-
-bail:
-	mutex_unlock(&ipath_mutex);
-
-	return ret;
-}
-
-/**
- * ipath_diagpkt_write - write an IB packet
- * @fp: the diag data device file pointer
- * @data: ipath_diag_pkt structure saying where to get the packet
- * @count: size of data to write
- * @off: unused by this code
- */
-static ssize_t ipath_diagpkt_write(struct file *fp,
-				   const char __user *data,
-				   size_t count, loff_t *off)
-{
-	u32 __iomem *piobuf;
-	u32 plen, pbufn, maxlen_reserve;
-	struct ipath_diag_pkt odp;
-	struct ipath_diag_xpkt dp;
-	u32 *tmpbuf = NULL;
-	struct ipath_devdata *dd;
-	ssize_t ret = 0;
-	u64 val;
-	u32 l_state, lt_state; /* LinkState, LinkTrainingState */
-
-
-	if (count == sizeof(dp)) {
-		if (copy_from_user(&dp, data, sizeof(dp))) {
-			ret = -EFAULT;
-			goto bail;
-		}
-	} else if (count == sizeof(odp)) {
-		if (copy_from_user(&odp, data, sizeof(odp))) {
-			ret = -EFAULT;
-			goto bail;
-		}
-		dp.len = odp.len;
-		dp.unit = odp.unit;
-		dp.data = odp.data;
-		dp.pbc_wd = 0;
-	} else {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* send count must be an exact number of dwords */
-	if (dp.len & 3) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	plen = dp.len >> 2;
-
-	dd = ipath_lookup(dp.unit);
-	if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
-	    !dd->ipath_kregbase) {
-		ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
-			   dp.unit);
-		ret = -ENODEV;
-		goto bail;
-	}
-
-	if (ipath_diag_inuse && !diag_set_link &&
-	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-		diag_set_link = 1;
-		ipath_cdbg(VERBOSE, "Trying to set to set link active for "
-			   "diag pkt\n");
-		ipath_set_linkstate(dd, IPATH_IB_LINKARM);
-		ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
-	}
-
-	if (!(dd->ipath_flags & IPATH_INITTED)) {
-		/* no hardware, freeze, etc. */
-		ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
-		ret = -ENODEV;
-		goto bail;
-	}
-	/*
-	 * Want to skip check for l_state if using custom PBC,
-	 * because we might be trying to force an SM packet out.
-	 * first-cut, skip _all_ state checking in that case.
-	 */
-	val = ipath_ib_state(dd, dd->ipath_lastibcstat);
-	lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
-	l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
-	if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
-	    (val != dd->ib_init && val != dd->ib_arm &&
-	    val != dd->ib_active))) {
-		ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
-			   dd->ipath_unit, (unsigned long long) val);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/*
-	 * need total length before first word written, plus 2 Dwords. One Dword
-	 * is for padding so we get the full user data when not aligned on
-	 * a word boundary. The other Dword is to make sure we have room for the
-	 * ICRC which gets tacked on later.
-	 */
-	maxlen_reserve = 2 * sizeof(u32);
-	if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
-		ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
-			  dp.len, dd->ipath_ibmaxlen);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	plen = sizeof(u32) + dp.len;
-
-	tmpbuf = vmalloc(plen);
-	if (!tmpbuf) {
-		dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
-			 "failing\n");
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	if (copy_from_user(tmpbuf,
-			   (const void __user *) (unsigned long) dp.data,
-			   dp.len)) {
-		ret = -EFAULT;
-		goto bail;
-	}
-
-	plen >>= 2;		/* in dwords */
-
-	piobuf = ipath_getpiobuf(dd, plen, &pbufn);
-	if (!piobuf) {
-		ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
-			   dd->ipath_unit);
-		ret = -EBUSY;
-		goto bail;
-	}
-	/* disarm it just to be extra sure */
-	ipath_disarm_piobufs(dd, pbufn, 1);
-
-	if (ipath_debug & __IPATH_PKTDBG)
-		ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
-			   dd->ipath_unit, plen - 1, pbufn);
-
-	if (dp.pbc_wd == 0)
-		dp.pbc_wd = plen;
-	writeq(dp.pbc_wd, piobuf);
-	/*
-	 * Copy all by the trigger word, then flush, so it's written
-	 * to chip before trigger word, then write trigger word, then
-	 * flush again, so packet is sent.
-	 */
-	if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
-		ipath_flush_wc();
-		__iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
-		ipath_flush_wc();
-		__raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
-	} else
-		__iowrite32_copy(piobuf + 2, tmpbuf, plen);
-
-	ipath_flush_wc();
-
-	ret = sizeof(dp);
-
-bail:
-	vfree(tmpbuf);
-	return ret;
-}
-
-static int ipath_diag_release(struct inode *in, struct file *fp)
-{
-	mutex_lock(&ipath_mutex);
-	ipath_diag_inuse = 0;
-	fp->private_data = NULL;
-	mutex_unlock(&ipath_mutex);
-	return 0;
-}
-
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
-			       size_t count, loff_t *off)
-{
-	struct ipath_devdata *dd = fp->private_data;
-	void __iomem *kreg_base;
-	ssize_t ret;
-
-	kreg_base = dd->ipath_kregbase;
-
-	if (count == 0)
-		ret = 0;
-	else if ((count % 4) || (*off % 4))
-		/* address or length is not 32-bit aligned, hence invalid */
-		ret = -EINVAL;
-	else if (ipath_diag_inuse < 1 && (*off || count != 8))
-		ret = -EINVAL;  /* prevent cat /dev/ipath_diag* */
-	else if ((count % 8) || (*off % 8))
-		/* address or length not 64-bit aligned; do 32-bit reads */
-		ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
-	else
-		ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
-
-	if (ret >= 0) {
-		*off += count;
-		ret = count;
-		if (ipath_diag_inuse == -2)
-			ipath_diag_inuse++;
-	}
-
-	return ret;
-}
-
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
-				size_t count, loff_t *off)
-{
-	struct ipath_devdata *dd = fp->private_data;
-	void __iomem *kreg_base;
-	ssize_t ret;
-
-	kreg_base = dd->ipath_kregbase;
-
-	if (count == 0)
-		ret = 0;
-	else if ((count % 4) || (*off % 4))
-		/* address or length is not 32-bit aligned, hence invalid */
-		ret = -EINVAL;
-	else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
-		 ipath_diag_inuse == -2)  /* read qw off 0, write qw off 0 */
-		ret = -EINVAL;  /* before any other write allowed */
-	else if ((count % 8) || (*off % 8))
-		/* address or length not 64-bit aligned; do 32-bit writes */
-		ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
-	else
-		ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
-
-	if (ret >= 0) {
-		*off += count;
-		ret = count;
-		if (ipath_diag_inuse == -1)
-			ipath_diag_inuse = 1; /* all read/write OK now */
-	}
-
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_dma.c b/drivers/staging/rdma/ipath/ipath_dma.c
deleted file mode 100644
index 123a8c0..0000000
--- a/drivers/staging/rdma/ipath/ipath_dma.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/scatterlist.h>
-#include <linux/gfp.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_verbs.h"
-
-#define BAD_DMA_ADDRESS ((u64) 0)
-
-/*
- * The following functions implement driver specific replacements
- * for the ib_dma_*() functions.
- *
- * These functions return kernel virtual addresses instead of
- * device bus addresses since the driver uses the CPU to copy
- * data instead of using hardware DMA.
- */
-
-static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
-	return dma_addr == BAD_DMA_ADDRESS;
-}
-
-static u64 ipath_dma_map_single(struct ib_device *dev,
-			        void *cpu_addr, size_t size,
-			        enum dma_data_direction direction)
-{
-	BUG_ON(!valid_dma_direction(direction));
-	return (u64) cpu_addr;
-}
-
-static void ipath_dma_unmap_single(struct ib_device *dev,
-				   u64 addr, size_t size,
-				   enum dma_data_direction direction)
-{
-	BUG_ON(!valid_dma_direction(direction));
-}
-
-static u64 ipath_dma_map_page(struct ib_device *dev,
-			      struct page *page,
-			      unsigned long offset,
-			      size_t size,
-			      enum dma_data_direction direction)
-{
-	u64 addr;
-
-	BUG_ON(!valid_dma_direction(direction));
-
-	if (offset + size > PAGE_SIZE) {
-		addr = BAD_DMA_ADDRESS;
-		goto done;
-	}
-
-	addr = (u64) page_address(page);
-	if (addr)
-		addr += offset;
-	/* TODO: handle highmem pages */
-
-done:
-	return addr;
-}
-
-static void ipath_dma_unmap_page(struct ib_device *dev,
-				 u64 addr, size_t size,
-				 enum dma_data_direction direction)
-{
-	BUG_ON(!valid_dma_direction(direction));
-}
-
-static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
-			int nents, enum dma_data_direction direction)
-{
-	struct scatterlist *sg;
-	u64 addr;
-	int i;
-	int ret = nents;
-
-	BUG_ON(!valid_dma_direction(direction));
-
-	for_each_sg(sgl, sg, nents, i) {
-		addr = (u64) page_address(sg_page(sg));
-		/* TODO: handle highmem pages */
-		if (!addr) {
-			ret = 0;
-			break;
-		}
-		sg->dma_address = addr + sg->offset;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
-		sg->dma_length = sg->length;
-#endif
-	}
-	return ret;
-}
-
-static void ipath_unmap_sg(struct ib_device *dev,
-			   struct scatterlist *sg, int nents,
-			   enum dma_data_direction direction)
-{
-	BUG_ON(!valid_dma_direction(direction));
-}
-
-static void ipath_sync_single_for_cpu(struct ib_device *dev,
-				      u64 addr,
-				      size_t size,
-				      enum dma_data_direction dir)
-{
-}
-
-static void ipath_sync_single_for_device(struct ib_device *dev,
-					 u64 addr,
-					 size_t size,
-					 enum dma_data_direction dir)
-{
-}
-
-static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
-				      u64 *dma_handle, gfp_t flag)
-{
-	struct page *p;
-	void *addr = NULL;
-
-	p = alloc_pages(flag, get_order(size));
-	if (p)
-		addr = page_address(p);
-	if (dma_handle)
-		*dma_handle = (u64) addr;
-	return addr;
-}
-
-static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
-				    void *cpu_addr, u64 dma_handle)
-{
-	free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
-struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
-	.mapping_error = ipath_mapping_error,
-	.map_single = ipath_dma_map_single,
-	.unmap_single = ipath_dma_unmap_single,
-	.map_page = ipath_dma_map_page,
-	.unmap_page = ipath_dma_unmap_page,
-	.map_sg = ipath_map_sg,
-	.unmap_sg = ipath_unmap_sg,
-	.sync_single_for_cpu = ipath_sync_single_for_cpu,
-	.sync_single_for_device = ipath_sync_single_for_device,
-	.alloc_coherent = ipath_dma_alloc_coherent,
-	.free_coherent = ipath_dma_free_coherent
-};
diff --git a/drivers/staging/rdma/ipath/ipath_driver.c b/drivers/staging/rdma/ipath/ipath_driver.c
deleted file mode 100644
index 2ab22f9..0000000
--- a/drivers/staging/rdma/ipath/ipath_driver.c
+++ /dev/null
@@ -1,2784 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#ifdef CONFIG_X86_64
-#include <asm/pat.h>
-#endif
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-
-static void ipath_update_pio_bufs(struct ipath_devdata *);
-
-const char *ipath_get_unit_name(int unit)
-{
-	static char iname[16];
-	snprintf(iname, sizeof iname, "infinipath%u", unit);
-	return iname;
-}
-
-#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
-#define PFX IPATH_DRV_NAME ": "
-
-/*
- * The size has to be longer than this string, so we can append
- * board/chip information to it in the init code.
- */
-const char ib_ipath_version[] = IPATH_IDSTR "\n";
-
-static struct idr unit_table;
-DEFINE_SPINLOCK(ipath_devs_lock);
-LIST_HEAD(ipath_dev_list);
-
-wait_queue_head_t ipath_state_wait;
-
-unsigned ipath_debug = __IPATH_INFO;
-
-module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "mask for debug prints");
-EXPORT_SYMBOL_GPL(ipath_debug);
-
-unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
-module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
-MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
-
-static unsigned ipath_hol_timeout_ms = 13000;
-module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
-MODULE_PARM_DESC(hol_timeout_ms,
-	"duration of user app suspension after link failure");
-
-unsigned ipath_linkrecovery = 1;
-module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("QLogic <support@qlogic.com>");
-MODULE_DESCRIPTION("QLogic InfiniPath driver");
-
-/*
- * Table to translate the LINKTRAININGSTATE portion of
- * IBCStatus to a human-readable form.
- */
-const char *ipath_ibcstatus_str[] = {
-	"Disabled",
-	"LinkUp",
-	"PollActive",
-	"PollQuiet",
-	"SleepDelay",
-	"SleepQuiet",
-	"LState6",		/* unused */
-	"LState7",		/* unused */
-	"CfgDebounce",
-	"CfgRcvfCfg",
-	"CfgWaitRmt",
-	"CfgIdle",
-	"RecovRetrain",
-	"CfgTxRevLane",		/* unused before IBA7220 */
-	"RecovWaitRmt",
-	"RecovIdle",
-	/* below were added for IBA7220 */
-	"CfgEnhanced",
-	"CfgTest",
-	"CfgWaitRmtTest",
-	"CfgWaitCfgEnhanced",
-	"SendTS_T",
-	"SendTstIdles",
-	"RcvTS_T",
-	"SendTst_TS1s",
-	"LTState18", "LTState19", "LTState1A", "LTState1B",
-	"LTState1C", "LTState1D", "LTState1E", "LTState1F"
-};
-
-static void ipath_remove_one(struct pci_dev *);
-static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);
-
-/* Only needed for registration, nothing else needs this info */
-#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-
-/* Number of seconds before our card status check...  */
-#define STATUS_TIMEOUT 60
-
-static const struct pci_device_id ipath_pci_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
-	{ 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
-
-static struct pci_driver ipath_driver = {
-	.name = IPATH_DRV_NAME,
-	.probe = ipath_init_one,
-	.remove = ipath_remove_one,
-	.id_table = ipath_pci_tbl,
-	.driver = {
-		.groups = ipath_driver_attr_groups,
-	},
-};
-
-static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
-			     u32 *bar0, u32 *bar1)
-{
-	int ret;
-
-	ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
-	if (ret)
-		ipath_dev_err(dd, "failed to read bar0 before enable: "
-			      "error %d\n", -ret);
-
-	ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
-	if (ret)
-		ipath_dev_err(dd, "failed to read bar1 before enable: "
-			      "error %d\n", -ret);
-
-	ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
-}
-
-static void ipath_free_devdata(struct pci_dev *pdev,
-			       struct ipath_devdata *dd)
-{
-	unsigned long flags;
-
-	pci_set_drvdata(pdev, NULL);
-
-	if (dd->ipath_unit != -1) {
-		spin_lock_irqsave(&ipath_devs_lock, flags);
-		idr_remove(&unit_table, dd->ipath_unit);
-		list_del(&dd->ipath_list);
-		spin_unlock_irqrestore(&ipath_devs_lock, flags);
-	}
-	vfree(dd);
-}
-
-static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
-{
-	unsigned long flags;
-	struct ipath_devdata *dd;
-	int ret;
-
-	dd = vzalloc(sizeof(*dd));
-	if (!dd) {
-		dd = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-	dd->ipath_unit = -1;
-
-	idr_preload(GFP_KERNEL);
-	spin_lock_irqsave(&ipath_devs_lock, flags);
-
-	ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
-	if (ret < 0) {
-		printk(KERN_ERR IPATH_DRV_NAME
-		       ": Could not allocate unit ID: error %d\n", -ret);
-		ipath_free_devdata(pdev, dd);
-		dd = ERR_PTR(ret);
-		goto bail_unlock;
-	}
-	dd->ipath_unit = ret;
-
-	dd->pcidev = pdev;
-	pci_set_drvdata(pdev, dd);
-
-	list_add(&dd->ipath_list, &ipath_dev_list);
-
-bail_unlock:
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
-	idr_preload_end();
-bail:
-	return dd;
-}
-
-static inline struct ipath_devdata *__ipath_lookup(int unit)
-{
-	return idr_find(&unit_table, unit);
-}
-
-struct ipath_devdata *ipath_lookup(int unit)
-{
-	struct ipath_devdata *dd;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ipath_devs_lock, flags);
-	dd = __ipath_lookup(unit);
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-	return dd;
-}
-
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
-{
-	int nunits, npresent, nup;
-	struct ipath_devdata *dd;
-	unsigned long flags;
-	int maxports;
-
-	nunits = npresent = nup = maxports = 0;
-
-	spin_lock_irqsave(&ipath_devs_lock, flags);
-
-	list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-		nunits++;
-		if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
-			npresent++;
-		if (dd->ipath_lid &&
-		    !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
-					 | IPATH_LINKUNK)))
-			nup++;
-		if (dd->ipath_cfgports > maxports)
-			maxports = dd->ipath_cfgports;
-	}
-
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-	if (npresentp)
-		*npresentp = npresent;
-	if (nupp)
-		*nupp = nup;
-	if (maxportsp)
-		*maxportsp = maxports;
-
-	return nunits;
-}
-
-/*
- * These next two routines are placeholders in case we don't have per-arch
- * code for controlling write combining.  If explicit control of write
- * combining is not available, performance will probably be awful.
- */
-
-int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
-{
-	return -EOPNOTSUPP;
-}
-
-void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
-{
-}
-
-/*
- * Perform a PIO buffer bandwidth write test, to verify proper system
- * configuration.  Even when all the setup calls work, occasionally
- * BIOS or other issues can prevent write combining from working, or
- * can cause other bandwidth problems to the chip.
- *
- * This test simply writes the same buffer over and over again, and
- * measures close to the peak bandwidth to the chip (not testing
- * data bandwidth to the wire).   On chips that use an address-based
- * trigger to send packets to the wire, this is easy.  On chips that
- * use a count to trigger, we want to make sure that the packet doesn't
- * go out on the wire, or trigger flow control checks.
- */
-static void ipath_verify_pioperf(struct ipath_devdata *dd)
-{
-	u32 pbnum, cnt, lcnt;
-	u32 __iomem *piobuf;
-	u32 *addr;
-	u64 msecs, emsecs;
-
-	piobuf = ipath_getpiobuf(dd, 0, &pbnum);
-	if (!piobuf) {
-		dev_info(&dd->pcidev->dev,
-			"No PIObufs for checking perf, skipping\n");
-		return;
-	}
-
-	/*
-	 * Enough to give us a reasonable test, less than piobuf size, and
-	 * likely multiple of store buffer length.
-	 */
-	cnt = 1024;
-
-	addr = vmalloc(cnt);
-	if (!addr) {
-		dev_info(&dd->pcidev->dev,
-			"Couldn't get memory for checking PIO perf,"
-			" skipping\n");
-		goto done;
-	}
-
-	preempt_disable();  /* we want reasonably accurate elapsed time */
-	msecs = 1 + jiffies_to_msecs(jiffies);
-	for (lcnt = 0; lcnt < 10000U; lcnt++) {
-		/* wait until we cross msec boundary */
-		if (jiffies_to_msecs(jiffies) >= msecs)
-			break;
-		udelay(1);
-	}
-
-	ipath_disable_armlaunch(dd);
-
-	/*
-	 * length 0, no dwords actually sent, and mark as VL15
-	 * on chips where that may matter (due to IB flowcontrol)
-	 */
-	if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
-		writeq(1UL << 63, piobuf);
-	else
-		writeq(0, piobuf);
-	ipath_flush_wc();
-
-	/*
-	 * this is only roughly accurate, since even with preempt we
-	 * still take interrupts that could take a while.   Running for
-	 * >= 5 msec seems to get us "close enough" to accurate values
-	 */
-	msecs = jiffies_to_msecs(jiffies);
-	for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
-		__iowrite32_copy(piobuf + 64, addr, cnt >> 2);
-		emsecs = jiffies_to_msecs(jiffies) - msecs;
-	}
-
-	/* 1 GiB/sec, slightly over IB SDR line rate */
-	if (lcnt < (emsecs * 1024U))
-		ipath_dev_err(dd,
-			"Performance problem: bandwidth to PIO buffers is "
-			"only %u MiB/sec\n",
-			lcnt / (u32) emsecs);
-	else
-		ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
-			lcnt / (u32) emsecs);
-
-	preempt_enable();
-
-	vfree(addr);
-
-done:
-	/* disarm piobuf, so it's available again */
-	ipath_disarm_piobufs(dd, pbnum, 1);
-	ipath_enable_armlaunch(dd);
-}
-
-static void cleanup_device(struct ipath_devdata *dd);
-
-static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	int ret, len, j;
-	struct ipath_devdata *dd;
-	unsigned long long addr;
-	u32 bar0 = 0, bar1 = 0;
-
-#ifdef CONFIG_X86_64
-	if (pat_enabled()) {
-		pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
-		ret = -ENODEV;
-		goto bail;
-	}
-#endif
-
-	dd = ipath_alloc_devdata(pdev);
-	if (IS_ERR(dd)) {
-		ret = PTR_ERR(dd);
-		printk(KERN_ERR IPATH_DRV_NAME
-		       ": Could not allocate devdata: error %d\n", -ret);
-		goto bail;
-	}
-
-	ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
-
-	ret = pci_enable_device(pdev);
-	if (ret) {
-		/* This can happen iff:
-		 *
-		 * We did a chip reset, and then failed to reprogram the
-		 * BAR, or the chip reset due to an internal error.  We then
-		 * unloaded the driver and reloaded it.
-		 *
-		 * Both reset cases set the BAR back to initial state.  For
-		 * the latter case, the AER sticky error bit at offset 0x718
-		 * should be set, but the Linux kernel doesn't yet know
-		 * about that, it appears.  If the original BAR was retained
-		 * in the kernel data structures, this may be OK.
-		 */
-		ipath_dev_err(dd, "enable unit %d failed: error %d\n",
-			      dd->ipath_unit, -ret);
-		goto bail_devdata;
-	}
-	addr = pci_resource_start(pdev, 0);
-	len = pci_resource_len(pdev, 0);
-	ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
-		   "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
-		   ent->device, ent->driver_data);
-
-	read_bars(dd, pdev, &bar0, &bar1);
-
-	if (!bar1 && !(bar0 & ~0xf)) {
-		if (addr) {
-			dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
-				 "rewriting as %llx\n", addr);
-			ret = pci_write_config_dword(
-				pdev, PCI_BASE_ADDRESS_0, addr);
-			if (ret) {
-				ipath_dev_err(dd, "rewrite of BAR0 "
-					      "failed: err %d\n", -ret);
-				goto bail_disable;
-			}
-			ret = pci_write_config_dword(
-				pdev, PCI_BASE_ADDRESS_1, addr >> 32);
-			if (ret) {
-				ipath_dev_err(dd, "rewrite of BAR1 "
-					      "failed: err %d\n", -ret);
-				goto bail_disable;
-			}
-		} else {
-			ipath_dev_err(dd, "BAR is 0 (probable RESET), "
-				      "not usable until reboot\n");
-			ret = -ENODEV;
-			goto bail_disable;
-		}
-	}
-
-	ret = pci_request_regions(pdev, IPATH_DRV_NAME);
-	if (ret) {
-		dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
-			 "err %d\n", dd->ipath_unit, -ret);
-		goto bail_disable;
-	}
-
-	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (ret) {
-		/*
-		 * if the 64 bit setup fails, try 32 bit.  Some systems
-		 * do not setup 64 bit maps on systems with 2GB or less
-		 * memory installed.
-		 */
-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (ret) {
-			dev_info(&pdev->dev,
-				"Unable to set DMA mask for unit %u: %d\n",
-				dd->ipath_unit, ret);
-			goto bail_regions;
-		} else {
-			ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
-			ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-			if (ret)
-				dev_info(&pdev->dev,
-					"Unable to set DMA consistent mask "
-					"for unit %u: %d\n",
-					dd->ipath_unit, ret);
-
-		}
-	} else {
-		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (ret)
-			dev_info(&pdev->dev,
-				"Unable to set DMA consistent mask "
-				"for unit %u: %d\n",
-				dd->ipath_unit, ret);
-	}
-
-	pci_set_master(pdev);
-
-	/*
-	 * Save BARs to rewrite after device reset.  Save all 64 bits of
-	 * BAR, just in case.
-	 */
-	dd->ipath_pcibar0 = addr;
-	dd->ipath_pcibar1 = addr >> 32;
-	dd->ipath_deviceid = ent->device;	/* save for later use */
-	dd->ipath_vendorid = ent->vendor;
-
-	/* setup the chip-specific functions, as early as possible. */
-	switch (ent->device) {
-	case PCI_DEVICE_ID_INFINIPATH_HT:
-		ipath_init_iba6110_funcs(dd);
-		break;
-
-	default:
-		ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
-			      "failing\n", ent->device);
-		return -ENODEV;
-	}
-
-	for (j = 0; j < 6; j++) {
-		if (!pdev->resource[j].start)
-			continue;
-		ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
-			   j, &pdev->resource[j],
-			   (unsigned long long)pci_resource_len(pdev, j));
-	}
-
-	if (!addr) {
-		ipath_dev_err(dd, "No valid address in BAR 0!\n");
-		ret = -ENODEV;
-		goto bail_regions;
-	}
-
-	dd->ipath_pcirev = pdev->revision;
-
-#if defined(__powerpc__)
-	/* There isn't a generic way to specify writethrough mappings */
-	dd->ipath_kregbase = __ioremap(addr, len,
-		(_PAGE_NO_CACHE|_PAGE_WRITETHRU));
-#else
-	/* XXX: split this properly to enable on PAT */
-	dd->ipath_kregbase = ioremap_nocache(addr, len);
-#endif
-
-	if (!dd->ipath_kregbase) {
-		ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
-			  addr);
-		ret = -ENOMEM;
-		goto bail_iounmap;
-	}
-	dd->ipath_kregend = (u64 __iomem *)
-		((void __iomem *)dd->ipath_kregbase + len);
-	dd->ipath_physaddr = addr;	/* used for io_remap, etc. */
-	/* for user mmap */
-	ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
-		   addr, dd->ipath_kregbase);
-
-	if (dd->ipath_f_bus(dd, pdev))
-		ipath_dev_err(dd, "Failed to setup config space; "
-			      "continuing anyway\n");
-
-	/*
-	 * set up our interrupt handler; IRQF_SHARED probably not needed,
-	 * since MSI interrupts shouldn't be shared but won't  hurt for now.
-	 * check 0 irq after we return from chip-specific bus setup, since
-	 * that can affect this due to setup
-	 */
-	if (!dd->ipath_irq)
-		ipath_dev_err(dd, "irq is 0, BIOS error?  Interrupts won't "
-			      "work\n");
-	else {
-		ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
-				  IPATH_DRV_NAME, dd);
-		if (ret) {
-			ipath_dev_err(dd, "Couldn't setup irq handler, "
-				      "irq=%d: %d\n", dd->ipath_irq, ret);
-			goto bail_iounmap;
-		}
-	}
-
-	ret = ipath_init_chip(dd, 0);	/* do the chip-specific init */
-	if (ret)
-		goto bail_irqsetup;
-
-	ret = ipath_enable_wc(dd);
-
-	if (ret)
-		ret = 0;
-
-	ipath_verify_pioperf(dd);
-
-	ipath_device_create_group(&pdev->dev, dd);
-	ipathfs_add_device(dd);
-	ipath_user_add(dd);
-	ipath_diag_add(dd);
-	ipath_register_ib_device(dd);
-
-	goto bail;
-
-bail_irqsetup:
-	cleanup_device(dd);
-
-	if (dd->ipath_irq)
-		dd->ipath_f_free_irq(dd);
-
-	if (dd->ipath_f_cleanup)
-		dd->ipath_f_cleanup(dd);
-
-bail_iounmap:
-	iounmap((volatile void __iomem *) dd->ipath_kregbase);
-
-bail_regions:
-	pci_release_regions(pdev);
-
-bail_disable:
-	pci_disable_device(pdev);
-
-bail_devdata:
-	ipath_free_devdata(pdev, dd);
-
-bail:
-	return ret;
-}
-
-static void cleanup_device(struct ipath_devdata *dd)
-{
-	int port;
-	struct ipath_portdata **tmp;
-	unsigned long flags;
-
-	if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
-		/* can't do anything more with chip; needs re-init */
-		*dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
-		if (dd->ipath_kregbase) {
-			/*
-			 * if we haven't already cleaned up before these are
-			 * to ensure any register reads/writes "fail" until
-			 * re-init
-			 */
-			dd->ipath_kregbase = NULL;
-			dd->ipath_uregbase = 0;
-			dd->ipath_sregbase = 0;
-			dd->ipath_cregbase = 0;
-			dd->ipath_kregsize = 0;
-		}
-		ipath_disable_wc(dd);
-	}
-
-	if (dd->ipath_spectriggerhit)
-		dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
-			 dd->ipath_spectriggerhit);
-
-	if (dd->ipath_pioavailregs_dma) {
-		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-				  (void *) dd->ipath_pioavailregs_dma,
-				  dd->ipath_pioavailregs_phys);
-		dd->ipath_pioavailregs_dma = NULL;
-	}
-	if (dd->ipath_dummy_hdrq) {
-		dma_free_coherent(&dd->pcidev->dev,
-			dd->ipath_pd[0]->port_rcvhdrq_size,
-			dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
-		dd->ipath_dummy_hdrq = NULL;
-	}
-
-	if (dd->ipath_pageshadow) {
-		struct page **tmpp = dd->ipath_pageshadow;
-		dma_addr_t *tmpd = dd->ipath_physshadow;
-		int i, cnt = 0;
-
-		ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
-			   "locked\n");
-		for (port = 0; port < dd->ipath_cfgports; port++) {
-			int port_tidbase = port * dd->ipath_rcvtidcnt;
-			int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
-			for (i = port_tidbase; i < maxtid; i++) {
-				if (!tmpp[i])
-					continue;
-				pci_unmap_page(dd->pcidev, tmpd[i],
-					PAGE_SIZE, PCI_DMA_FROMDEVICE);
-				ipath_release_user_pages(&tmpp[i], 1);
-				tmpp[i] = NULL;
-				cnt++;
-			}
-		}
-		if (cnt) {
-			ipath_stats.sps_pageunlocks += cnt;
-			ipath_cdbg(VERBOSE, "There were still %u expTID "
-				   "entries locked\n", cnt);
-		}
-		if (ipath_stats.sps_pagelocks ||
-		    ipath_stats.sps_pageunlocks)
-			ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
-				   "unlocked via ipath_m{un}lock\n",
-				   (unsigned long long)
-				   ipath_stats.sps_pagelocks,
-				   (unsigned long long)
-				   ipath_stats.sps_pageunlocks);
-
-		ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
-			   dd->ipath_pageshadow);
-		tmpp = dd->ipath_pageshadow;
-		dd->ipath_pageshadow = NULL;
-		vfree(tmpp);
-
-		dd->ipath_egrtidbase = NULL;
-	}
-
-	/*
-	 * free any resources still in use (usually just kernel ports)
-	 * at unload; we do for portcnt, because that's what we allocate.
-	 * We acquire lock to be really paranoid that ipath_pd isn't being
-	 * accessed from some interrupt-related code (that should not happen,
-	 * but best to be sure).
-	 */
-	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-	tmp = dd->ipath_pd;
-	dd->ipath_pd = NULL;
-	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-	for (port = 0; port < dd->ipath_portcnt; port++) {
-		struct ipath_portdata *pd = tmp[port];
-		tmp[port] = NULL; /* debugging paranoia */
-		ipath_free_pddata(dd, pd);
-	}
-	kfree(tmp);
-}
-
-static void ipath_remove_one(struct pci_dev *pdev)
-{
-	struct ipath_devdata *dd = pci_get_drvdata(pdev);
-
-	ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
-
-	/*
-	 * disable the IB link early, to be sure no new packets arrive, which
-	 * complicates the shutdown process
-	 */
-	ipath_shutdown_device(dd);
-
-	flush_workqueue(ib_wq);
-
-	if (dd->verbs_dev)
-		ipath_unregister_ib_device(dd->verbs_dev);
-
-	ipath_diag_remove(dd);
-	ipath_user_remove(dd);
-	ipathfs_remove_device(dd);
-	ipath_device_remove_group(&pdev->dev, dd);
-
-	ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
-		   "unit %u\n", dd, (u32) dd->ipath_unit);
-
-	cleanup_device(dd);
-
-	/*
-	 * turn off rcv, send, and interrupts for all ports, all drivers
-	 * should also hard reset the chip here?
-	 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
-	 * for all versions of the driver, if they were allocated
-	 */
-	if (dd->ipath_irq) {
-		ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
-			   dd->ipath_unit, dd->ipath_irq);
-		dd->ipath_f_free_irq(dd);
-	} else
-		ipath_dbg("irq is 0, not doing free_irq "
-			  "for unit %u\n", dd->ipath_unit);
-	/*
-	 * we check for NULL here, because it's outside
-	 * the kregbase check, and we need to call it
-	 * after the free_irq.	Thus it's possible that
-	 * the function pointers were never initialized.
-	 */
-	if (dd->ipath_f_cleanup)
-		/* clean up chip-specific stuff */
-		dd->ipath_f_cleanup(dd);
-
-	ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
-	iounmap((volatile void __iomem *) dd->ipath_kregbase);
-	pci_release_regions(pdev);
-	ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
-	pci_disable_device(pdev);
-
-	ipath_free_devdata(pdev, dd);
-}
-
-/* general driver use */
-DEFINE_MUTEX(ipath_mutex);
-
-static DEFINE_SPINLOCK(ipath_pioavail_lock);
-
-/**
- * ipath_disarm_piobufs - cancel a range of PIO buffers
- * @dd: the infinipath device
- * @first: the first PIO buffer to cancel
- * @cnt: the number of PIO buffers to cancel
- *
- * cancel a range of PIO buffers, used when they might be armed, but
- * not triggered.  Used at init to ensure buffer state, and also user
- * process close, in case it died while writing to a PIO buffer
- * Also after errors.
- */
-void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
-			  unsigned cnt)
-{
-	unsigned i, last = first + cnt;
-	unsigned long flags;
-
-	ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
-	for (i = first; i < last; i++) {
-		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-		/*
-		 * The disarm-related bits are write-only, so it
-		 * is ok to OR them in with our copy of sendctrl
-		 * while we hold the lock.
-		 */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			dd->ipath_sendctrl | INFINIPATH_S_DISARM |
-			(i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
-		/* can't disarm bufs back-to-back per iba7220 spec */
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-	}
-	/* on some older chips, update may not happen after cancel */
-	ipath_force_pio_avail_update(dd);
-}
-
-/**
- * ipath_wait_linkstate - wait for an IB link state change to occur
- * @dd: the infinipath device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * wait up to msecs milliseconds for IB link state change to occur for
- * now, take the easy polling route.  Currently used only by
- * ipath_set_linkstate.  Returns 0 if state reached, otherwise
- * -ETIMEDOUT state can have multiple states set, for any of several
- * transitions.
- */
-int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
-{
-	dd->ipath_state_wanted = state;
-	wait_event_interruptible_timeout(ipath_state_wait,
-					 (dd->ipath_flags & state),
-					 msecs_to_jiffies(msecs));
-	dd->ipath_state_wanted = 0;
-
-	if (!(dd->ipath_flags & state)) {
-		u64 val;
-		ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
-			   " ms\n",
-			   /* test INIT ahead of DOWN, both can be set */
-			   (state & IPATH_LINKINIT) ? "INIT" :
-			   ((state & IPATH_LINKDOWN) ? "DOWN" :
-			    ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
-			   msecs);
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-		ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
-			   (unsigned long long) ipath_read_kreg64(
-				   dd, dd->ipath_kregs->kr_ibcctrl),
-			   (unsigned long long) val,
-			   ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
-	}
-	return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
-}
-
-static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
-	char *buf, size_t blen)
-{
-	static const struct {
-		ipath_err_t err;
-		const char *msg;
-	} errs[] = {
-		{ INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
-		{ INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
-		{ INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
-		{ INFINIPATH_E_SDMABASE, "SDmaBase" },
-		{ INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
-		{ INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
-		{ INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
-		{ INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
-		{ INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
-		{ INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
-		{ INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
-		{ INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
-	};
-	int i;
-	int expected;
-	size_t bidx = 0;
-
-	for (i = 0; i < ARRAY_SIZE(errs); i++) {
-		expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
-			test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-		if ((err & errs[i].err) && !expected)
-			bidx += snprintf(buf + bidx, blen - bidx,
-					 "%s ", errs[i].msg);
-	}
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else.   Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
-	ipath_err_t err)
-{
-	int iserr = 1;
-	*buf = '\0';
-	if (err & INFINIPATH_E_PKTERRS) {
-		if (!(err & ~INFINIPATH_E_PKTERRS))
-			iserr = 0; // if only packet errors.
-		if (ipath_debug & __IPATH_ERRPKTDBG) {
-			if (err & INFINIPATH_E_REBP)
-				strlcat(buf, "EBP ", blen);
-			if (err & INFINIPATH_E_RVCRC)
-				strlcat(buf, "VCRC ", blen);
-			if (err & INFINIPATH_E_RICRC) {
-				strlcat(buf, "CRC ", blen);
-				// clear for check below, so only once
-				err &= INFINIPATH_E_RICRC;
-			}
-			if (err & INFINIPATH_E_RSHORTPKTLEN)
-				strlcat(buf, "rshortpktlen ", blen);
-			if (err & INFINIPATH_E_SDROPPEDDATAPKT)
-				strlcat(buf, "sdroppeddatapkt ", blen);
-			if (err & INFINIPATH_E_SPKTLEN)
-				strlcat(buf, "spktlen ", blen);
-		}
-		if ((err & INFINIPATH_E_RICRC) &&
-			!(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
-			strlcat(buf, "CRC ", blen);
-		if (!iserr)
-			goto done;
-	}
-	if (err & INFINIPATH_E_RHDRLEN)
-		strlcat(buf, "rhdrlen ", blen);
-	if (err & INFINIPATH_E_RBADTID)
-		strlcat(buf, "rbadtid ", blen);
-	if (err & INFINIPATH_E_RBADVERSION)
-		strlcat(buf, "rbadversion ", blen);
-	if (err & INFINIPATH_E_RHDR)
-		strlcat(buf, "rhdr ", blen);
-	if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
-		strlcat(buf, "sendspecialtrigger ", blen);
-	if (err & INFINIPATH_E_RLONGPKTLEN)
-		strlcat(buf, "rlongpktlen ", blen);
-	if (err & INFINIPATH_E_RMAXPKTLEN)
-		strlcat(buf, "rmaxpktlen ", blen);
-	if (err & INFINIPATH_E_RMINPKTLEN)
-		strlcat(buf, "rminpktlen ", blen);
-	if (err & INFINIPATH_E_SMINPKTLEN)
-		strlcat(buf, "sminpktlen ", blen);
-	if (err & INFINIPATH_E_RFORMATERR)
-		strlcat(buf, "rformaterr ", blen);
-	if (err & INFINIPATH_E_RUNSUPVL)
-		strlcat(buf, "runsupvl ", blen);
-	if (err & INFINIPATH_E_RUNEXPCHAR)
-		strlcat(buf, "runexpchar ", blen);
-	if (err & INFINIPATH_E_RIBFLOW)
-		strlcat(buf, "ribflow ", blen);
-	if (err & INFINIPATH_E_SUNDERRUN)
-		strlcat(buf, "sunderrun ", blen);
-	if (err & INFINIPATH_E_SPIOARMLAUNCH)
-		strlcat(buf, "spioarmlaunch ", blen);
-	if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
-		strlcat(buf, "sunexperrpktnum ", blen);
-	if (err & INFINIPATH_E_SDROPPEDSMPPKT)
-		strlcat(buf, "sdroppedsmppkt ", blen);
-	if (err & INFINIPATH_E_SMAXPKTLEN)
-		strlcat(buf, "smaxpktlen ", blen);
-	if (err & INFINIPATH_E_SUNSUPVL)
-		strlcat(buf, "sunsupVL ", blen);
-	if (err & INFINIPATH_E_INVALIDADDR)
-		strlcat(buf, "invalidaddr ", blen);
-	if (err & INFINIPATH_E_RRCVEGRFULL)
-		strlcat(buf, "rcvegrfull ", blen);
-	if (err & INFINIPATH_E_RRCVHDRFULL)
-		strlcat(buf, "rcvhdrfull ", blen);
-	if (err & INFINIPATH_E_IBSTATUSCHANGED)
-		strlcat(buf, "ibcstatuschg ", blen);
-	if (err & INFINIPATH_E_RIBLOSTLINK)
-		strlcat(buf, "riblostlink ", blen);
-	if (err & INFINIPATH_E_HARDWARE)
-		strlcat(buf, "hardware ", blen);
-	if (err & INFINIPATH_E_RESET)
-		strlcat(buf, "reset ", blen);
-	if (err & INFINIPATH_E_SDMAERRS)
-		decode_sdma_errs(dd, err, buf, blen);
-	if (err & INFINIPATH_E_INVALIDEEPCMD)
-		strlcat(buf, "invalideepromcmd ", blen);
-done:
-	return iserr;
-}
-
-/**
- * get_rhf_errstring - decode RHF errors
- * @err: the err number
- * @msg: the output buffer
- * @len: the length of the output buffer
- *
- * only used one place now, may want more later
- */
-static void get_rhf_errstring(u32 err, char *msg, size_t len)
-{
-	/* if no errors, and so don't need to check what's first */
-	*msg = '\0';
-
-	if (err & INFINIPATH_RHF_H_ICRCERR)
-		strlcat(msg, "icrcerr ", len);
-	if (err & INFINIPATH_RHF_H_VCRCERR)
-		strlcat(msg, "vcrcerr ", len);
-	if (err & INFINIPATH_RHF_H_PARITYERR)
-		strlcat(msg, "parityerr ", len);
-	if (err & INFINIPATH_RHF_H_LENERR)
-		strlcat(msg, "lenerr ", len);
-	if (err & INFINIPATH_RHF_H_MTUERR)
-		strlcat(msg, "mtuerr ", len);
-	if (err & INFINIPATH_RHF_H_IHDRERR)
-		/* infinipath hdr checksum error */
-		strlcat(msg, "ipathhdrerr ", len);
-	if (err & INFINIPATH_RHF_H_TIDERR)
-		strlcat(msg, "tiderr ", len);
-	if (err & INFINIPATH_RHF_H_MKERR)
-		/* bad port, offset, etc. */
-		strlcat(msg, "invalid ipathhdr ", len);
-	if (err & INFINIPATH_RHF_H_IBERR)
-		strlcat(msg, "iberr ", len);
-	if (err & INFINIPATH_RHF_L_SWA)
-		strlcat(msg, "swA ", len);
-	if (err & INFINIPATH_RHF_L_SWB)
-		strlcat(msg, "swB ", len);
-}
-
-/**
- * ipath_get_egrbuf - get an eager buffer
- * @dd: the infinipath device
- * @bufnum: the eager buffer to get
- *
- * must only be called if ipath_pd[port] is known to be allocated
- */
-static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
-{
-	return dd->ipath_port0_skbinfo ?
-		(void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
-}
-
-/**
- * ipath_alloc_skb - allocate an skb and buffer with possible constraints
- * @dd: the infinipath device
- * @gfp_mask: the sk_buff SFP mask
- */
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
-				gfp_t gfp_mask)
-{
-	struct sk_buff *skb;
-	u32 len;
-
-	/*
-	 * Only fully supported way to handle this is to allocate lots
-	 * extra, align as needed, and then do skb_reserve().  That wastes
-	 * a lot of memory...  I'll have to hack this into infinipath_copy
-	 * also.
-	 */
-
-	/*
-	 * We need 2 extra bytes for ipath_ether data sent in the
-	 * key header.  In order to keep everything dword aligned,
-	 * we'll reserve 4 bytes.
-	 */
-	len = dd->ipath_ibmaxlen + 4;
-
-	if (dd->ipath_flags & IPATH_4BYTE_TID) {
-		/* We need a 2KB multiple alignment, and there is no way
-		 * to do it except to allocate extra and then skb_reserve
-		 * enough to bring it up to the right alignment.
-		 */
-		len += 2047;
-	}
-
-	skb = __dev_alloc_skb(len, gfp_mask);
-	if (!skb) {
-		ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
-			      len);
-		goto bail;
-	}
-
-	skb_reserve(skb, 4);
-
-	if (dd->ipath_flags & IPATH_4BYTE_TID) {
-		u32 una = (unsigned long)skb->data & 2047;
-		if (una)
-			skb_reserve(skb, 2048 - una);
-	}
-
-bail:
-	return skb;
-}
-
-static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
-			     u32 eflags,
-			     u32 l,
-			     u32 etail,
-			     __le32 *rhf_addr,
-			     struct ipath_message_header *hdr)
-{
-	char emsg[128];
-
-	get_rhf_errstring(eflags, emsg, sizeof emsg);
-	ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
-		   "tlen=%x opcode=%x egridx=%x: %s\n",
-		   eflags, l,
-		   ipath_hdrget_rcv_type(rhf_addr),
-		   ipath_hdrget_length_in_bytes(rhf_addr),
-		   be32_to_cpu(hdr->bth[0]) >> 24,
-		   etail, emsg);
-
-	/* Count local link integrity errors. */
-	if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
-		u8 n = (dd->ipath_ibcctrl >>
-			INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-			INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-
-		if (++dd->ipath_lli_counter > n) {
-			dd->ipath_lli_counter = 0;
-			dd->ipath_lli_errors++;
-		}
-	}
-}
-
-/*
- * ipath_kreceive - receive a packet
- * @pd: the infinipath port
- *
- * called from interrupt handler for errors or receive interrupt
- */
-void ipath_kreceive(struct ipath_portdata *pd)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	__le32 *rhf_addr;
-	void *ebuf;
-	const u32 rsize = dd->ipath_rcvhdrentsize;	/* words */
-	const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;	/* words */
-	u32 etail = -1, l, hdrqtail;
-	struct ipath_message_header *hdr;
-	u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
-	static u64 totcalls;	/* stats, may eventually remove */
-	int last;
-
-	l = pd->port_head;
-	rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
-	if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-		u32 seq = ipath_hdrget_seq(rhf_addr);
-
-		if (seq != pd->port_seq_cnt)
-			goto bail;
-		hdrqtail = 0;
-	} else {
-		hdrqtail = ipath_get_rcvhdrtail(pd);
-		if (l == hdrqtail)
-			goto bail;
-		smp_rmb();
-	}
-
-reloop:
-	for (last = 0, i = 1; !last; i += !last) {
-		hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
-		eflags = ipath_hdrget_err_flags(rhf_addr);
-		etype = ipath_hdrget_rcv_type(rhf_addr);
-		/* total length */
-		tlen = ipath_hdrget_length_in_bytes(rhf_addr);
-		ebuf = NULL;
-		if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
-		    ipath_hdrget_use_egr_buf(rhf_addr) :
-		    (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
-			/*
-			 * It turns out that the chip uses an eager buffer
-			 * for all non-expected packets, whether it "needs"
-			 * one or not.  So always get the index, but don't
-			 * set ebuf (so we try to copy data) unless the
-			 * length requires it.
-			 */
-			etail = ipath_hdrget_index(rhf_addr);
-			updegr = 1;
-			if (tlen > sizeof(*hdr) ||
-			    etype == RCVHQ_RCV_TYPE_NON_KD)
-				ebuf = ipath_get_egrbuf(dd, etail);
-		}
-
-		/*
-		 * both tiderr and ipathhdrerr are set for all plain IB
-		 * packets; only ipathhdrerr should be set.
-		 */
-
-		if (etype != RCVHQ_RCV_TYPE_NON_KD &&
-		    etype != RCVHQ_RCV_TYPE_ERROR &&
-		    ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
-		    IPS_PROTO_VERSION)
-			ipath_cdbg(PKT, "Bad InfiniPath protocol version "
-				   "%x\n", etype);
-
-		if (unlikely(eflags))
-			ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
-		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
-			ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
-			if (dd->ipath_lli_counter)
-				dd->ipath_lli_counter--;
-		} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
-			u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
-			u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
-			ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
-				   "qp=%x), len %x; ignored\n",
-				   etype, opcode, qp, tlen);
-		} else if (etype == RCVHQ_RCV_TYPE_EXPECTED) {
-			ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
-				  be32_to_cpu(hdr->bth[0]) >> 24);
-		} else {
-			/*
-			 * error packet, type of error unknown.
-			 * Probably type 3, but we don't know, so don't
-			 * even try to print the opcode, etc.
-			 * Usually caused by a "bad packet", that has no
-			 * BTH, when the LRH says it should.
-			 */
-			ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
-				  " %x, len %x hdrq+%x rhf: %Lx\n",
-				  etail, tlen, l, (unsigned long long)
-				  le64_to_cpu(*(__le64 *) rhf_addr));
-			if (ipath_debug & __IPATH_ERRPKTDBG) {
-				u32 j, *d, dw = rsize-2;
-				if (rsize > (tlen>>2))
-					dw = tlen>>2;
-				d = (u32 *)hdr;
-				printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
-					dw);
-				for (j = 0; j < dw; j++)
-					printk(KERN_DEBUG "%8x%s", d[j],
-						(j%8) == 7 ? "\n" : " ");
-				printk(KERN_DEBUG ".\n");
-			}
-		}
-		l += rsize;
-		if (l >= maxcnt)
-			l = 0;
-		rhf_addr = (__le32 *) pd->port_rcvhdrq +
-			l + dd->ipath_rhf_offset;
-		if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-			u32 seq = ipath_hdrget_seq(rhf_addr);
-
-			if (++pd->port_seq_cnt > 13)
-				pd->port_seq_cnt = 1;
-			if (seq != pd->port_seq_cnt)
-				last = 1;
-		} else if (l == hdrqtail) {
-			last = 1;
-		}
-		/*
-		 * update head regs on last packet, and every 16 packets.
-		 * Reduce bus traffic, while still trying to prevent
-		 * rcvhdrq overflows, for when the queue is nearly full
-		 */
-		if (last || !(i & 0xf)) {
-			u64 lval = l;
-
-			/* request IBA6120 and 7220 interrupt only on last */
-			if (last)
-				lval |= dd->ipath_rhdrhead_intr_off;
-			ipath_write_ureg(dd, ur_rcvhdrhead, lval,
-				pd->port_port);
-			if (updegr) {
-				ipath_write_ureg(dd, ur_rcvegrindexhead,
-						 etail, pd->port_port);
-				updegr = 0;
-			}
-		}
-	}
-
-	if (!dd->ipath_rhdrhead_intr_off && !reloop &&
-	    !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-		/* IBA6110 workaround; we can have a race clearing chip
-		 * interrupt with another interrupt about to be delivered,
-		 * and can clear it before it is delivered on the GPIO
-		 * workaround.  By doing the extra check here for the
-		 * in-memory tail register updating while we were doing
-		 * earlier packets, we "almost" guarantee we have covered
-		 * that case.
-		 */
-		u32 hqtail = ipath_get_rcvhdrtail(pd);
-		if (hqtail != hdrqtail) {
-			hdrqtail = hqtail;
-			reloop = 1; /* loop 1 extra time at most */
-			goto reloop;
-		}
-	}
-
-	pkttot += i;
-
-	pd->port_head = l;
-
-	if (pkttot > ipath_stats.sps_maxpkts_call)
-		ipath_stats.sps_maxpkts_call = pkttot;
-	ipath_stats.sps_port0pkts += pkttot;
-	ipath_stats.sps_avgpkts_call =
-		ipath_stats.sps_port0pkts / ++totcalls;
-
-bail:;
-}
-
-/**
- * ipath_update_pio_bufs - update shadow copy of the PIO availability map
- * @dd: the infinipath device
- *
- * called whenever our local copy indicates we have run out of send buffers
- * NOTE: This can be called from interrupt context by some code
- * and from non-interrupt context by ipath_getpiobuf().
- */
-
-static void ipath_update_pio_bufs(struct ipath_devdata *dd)
-{
-	unsigned long flags;
-	int i;
-	const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
-
-	/* If the generation (check) bits have changed, then we update the
-	 * busy bit for the corresponding PIO buffer.  This algorithm will
-	 * modify positions to the value they already have in some cases
-	 * (i.e., no change), but it's faster than changing only the bits
-	 * that have changed.
-	 *
-	 * We would like to do this atomicly, to avoid spinlocks in the
-	 * critical send path, but that's not really possible, given the
-	 * type of changes, and that this routine could be called on
-	 * multiple cpu's simultaneously, so we lock in this routine only,
-	 * to avoid conflicting updates; all we change is the shadow, and
-	 * it's a single 64 bit memory location, so by definition the update
-	 * is atomic in terms of what other cpu's can see in testing the
-	 * bits.  The spin_lock overhead isn't too bad, since it only
-	 * happens when all buffers are in use, so only cpu overhead, not
-	 * latency or bandwidth is affected.
-	 */
-	if (!dd->ipath_pioavailregs_dma) {
-		ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
-		return;
-	}
-	if (ipath_debug & __IPATH_VERBDBG) {
-		/* only if packet debug and verbose */
-		volatile __le64 *dma = dd->ipath_pioavailregs_dma;
-		unsigned long *shadow = dd->ipath_pioavailshadow;
-
-		ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
-			   "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
-			   "s3=%lx\n",
-			   (unsigned long long) le64_to_cpu(dma[0]),
-			   shadow[0],
-			   (unsigned long long) le64_to_cpu(dma[1]),
-			   shadow[1],
-			   (unsigned long long) le64_to_cpu(dma[2]),
-			   shadow[2],
-			   (unsigned long long) le64_to_cpu(dma[3]),
-			   shadow[3]);
-		if (piobregs > 4)
-			ipath_cdbg(
-				PKT, "2nd group, dma4=%llx shad4=%lx, "
-				"d5=%llx s5=%lx, d6=%llx s6=%lx, "
-				"d7=%llx s7=%lx\n",
-				(unsigned long long) le64_to_cpu(dma[4]),
-				shadow[4],
-				(unsigned long long) le64_to_cpu(dma[5]),
-				shadow[5],
-				(unsigned long long) le64_to_cpu(dma[6]),
-				shadow[6],
-				(unsigned long long) le64_to_cpu(dma[7]),
-				shadow[7]);
-	}
-	spin_lock_irqsave(&ipath_pioavail_lock, flags);
-	for (i = 0; i < piobregs; i++) {
-		u64 pchbusy, pchg, piov, pnew;
-		/*
-		 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
-		 */
-		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
-		else
-			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
-		pchg = dd->ipath_pioavailkernel[i] &
-			~(dd->ipath_pioavailshadow[i] ^ piov);
-		pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
-		if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
-			pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
-			pnew |= piov & pchbusy;
-			dd->ipath_pioavailshadow[i] = pnew;
-		}
-	}
-	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/*
- * used to force update of pioavailshadow if we can't get a pio buffer.
- * Needed primarily due to exitting freeze mode after recovering
- * from errors.  Done lazily, because it's safer (known to not
- * be writing pio buffers).
- */
-static void ipath_reset_availshadow(struct ipath_devdata *dd)
-{
-	int i, im;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ipath_pioavail_lock, flags);
-	for (i = 0; i < dd->ipath_pioavregs; i++) {
-		u64 val, oldval;
-		/* deal with 6110 chip bug on high register #s */
-		im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
-			i ^ 1 : i;
-		val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
-		/*
-		 * busy out the buffers not in the kernel avail list,
-		 * without changing the generation bits.
-		 */
-		oldval = dd->ipath_pioavailshadow[i];
-		dd->ipath_pioavailshadow[i] = val |
-			((~dd->ipath_pioavailkernel[i] <<
-			INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
-			0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
-		if (oldval != dd->ipath_pioavailshadow[i])
-			ipath_dbg("shadow[%d] was %Lx, now %lx\n",
-				i, (unsigned long long) oldval,
-				dd->ipath_pioavailshadow[i]);
-	}
-	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/**
- * ipath_setrcvhdrsize - set the receive header size
- * @dd: the infinipath device
- * @rhdrsize: the receive header size
- *
- * called from user init code, and also layered driver init
- */
-int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
-{
-	int ret = 0;
-
-	if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
-		if (dd->ipath_rcvhdrsize != rhdrsize) {
-			dev_info(&dd->pcidev->dev,
-				 "Error: can't set protocol header "
-				 "size %u, already %u\n",
-				 rhdrsize, dd->ipath_rcvhdrsize);
-			ret = -EAGAIN;
-		} else
-			ipath_cdbg(VERBOSE, "Reuse same protocol header "
-				   "size %u\n", dd->ipath_rcvhdrsize);
-	} else if (rhdrsize > (dd->ipath_rcvhdrentsize -
-			       (sizeof(u64) / sizeof(u32)))) {
-		ipath_dbg("Error: can't set protocol header size %u "
-			  "(> max %u)\n", rhdrsize,
-			  dd->ipath_rcvhdrentsize -
-			  (u32) (sizeof(u64) / sizeof(u32)));
-		ret = -EOVERFLOW;
-	} else {
-		dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
-		dd->ipath_rcvhdrsize = rhdrsize;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
-				 dd->ipath_rcvhdrsize);
-		ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
-			   dd->ipath_rcvhdrsize);
-	}
-	return ret;
-}
-
-/*
- * debugging code and stats updates if no pio buffers available.
- */
-static noinline void no_pio_bufs(struct ipath_devdata *dd)
-{
-	unsigned long *shadow = dd->ipath_pioavailshadow;
-	__le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
-
-	dd->ipath_upd_pio_shadow = 1;
-
-	/*
-	 * not atomic, but if we lose a stat count in a while, that's OK
-	 */
-	ipath_stats.sps_nopiobufs++;
-	if (!(++dd->ipath_consec_nopiobuf % 100000)) {
-		ipath_force_pio_avail_update(dd); /* at start */
-		ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
-			"%llx %llx %llx %llx\n"
-			"ipath  shadow:  %lx %lx %lx %lx\n",
-			dd->ipath_consec_nopiobuf,
-			(unsigned long)get_cycles(),
-			(unsigned long long) le64_to_cpu(dma[0]),
-			(unsigned long long) le64_to_cpu(dma[1]),
-			(unsigned long long) le64_to_cpu(dma[2]),
-			(unsigned long long) le64_to_cpu(dma[3]),
-			shadow[0], shadow[1], shadow[2], shadow[3]);
-		/*
-		 * 4 buffers per byte, 4 registers above, cover rest
-		 * below
-		 */
-		if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
-		    (sizeof(shadow[0]) * 4 * 4))
-			ipath_dbg("2nd group: dmacopy: "
-				  "%llx %llx %llx %llx\n"
-				  "ipath  shadow:  %lx %lx %lx %lx\n",
-				  (unsigned long long)le64_to_cpu(dma[4]),
-				  (unsigned long long)le64_to_cpu(dma[5]),
-				  (unsigned long long)le64_to_cpu(dma[6]),
-				  (unsigned long long)le64_to_cpu(dma[7]),
-				  shadow[4], shadow[5], shadow[6], shadow[7]);
-
-		/* at end, so update likely happened */
-		ipath_reset_availshadow(dd);
-	}
-}
-
-/*
- * common code for normal driver pio buffer allocation, and reserved
- * allocation.
- *
- * do appropriate marking as busy, etc.
- * returns buffer number if one found (>=0), negative number is error.
- */
-static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
-	u32 *pbufnum, u32 first, u32 last, u32 firsti)
-{
-	int i, j, updated = 0;
-	unsigned piobcnt;
-	unsigned long flags;
-	unsigned long *shadow = dd->ipath_pioavailshadow;
-	u32 __iomem *buf;
-
-	piobcnt = last - first;
-	if (dd->ipath_upd_pio_shadow) {
-		/*
-		 * Minor optimization.  If we had no buffers on last call,
-		 * start out by doing the update; continue and do scan even
-		 * if no buffers were updated, to be paranoid
-		 */
-		ipath_update_pio_bufs(dd);
-		updated++;
-		i = first;
-	} else
-		i = firsti;
-rescan:
-	/*
-	 * while test_and_set_bit() is atomic, we do that and then the
-	 * change_bit(), and the pair is not.  See if this is the cause
-	 * of the remaining armlaunch errors.
-	 */
-	spin_lock_irqsave(&ipath_pioavail_lock, flags);
-	for (j = 0; j < piobcnt; j++, i++) {
-		if (i >= last)
-			i = first;
-		if (__test_and_set_bit((2 * i) + 1, shadow))
-			continue;
-		/* flip generation bit */
-		__change_bit(2 * i, shadow);
-		break;
-	}
-	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
-	if (j == piobcnt) {
-		if (!updated) {
-			/*
-			 * first time through; shadow exhausted, but may be
-			 * buffers available, try an update and then rescan.
-			 */
-			ipath_update_pio_bufs(dd);
-			updated++;
-			i = first;
-			goto rescan;
-		} else if (updated == 1 && piobcnt <=
-			((dd->ipath_sendctrl
-			>> INFINIPATH_S_UPDTHRESH_SHIFT) &
-			INFINIPATH_S_UPDTHRESH_MASK)) {
-			/*
-			 * for chips supporting and using the update
-			 * threshold we need to force an update of the
-			 * in-memory copy if the count is less than the
-			 * thershold, then check one more time.
-			 */
-			ipath_force_pio_avail_update(dd);
-			ipath_update_pio_bufs(dd);
-			updated++;
-			i = first;
-			goto rescan;
-		}
-
-		no_pio_bufs(dd);
-		buf = NULL;
-	} else {
-		if (i < dd->ipath_piobcnt2k)
-			buf = (u32 __iomem *) (dd->ipath_pio2kbase +
-					       i * dd->ipath_palign);
-		else
-			buf = (u32 __iomem *)
-				(dd->ipath_pio4kbase +
-				 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
-		if (pbufnum)
-			*pbufnum = i;
-	}
-
-	return buf;
-}
-
-/**
- * ipath_getpiobuf - find an available pio buffer
- * @dd: the infinipath device
- * @plen: the size of the PIO buffer needed in 32-bit words
- * @pbufnum: the buffer number is placed here
- */
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
-{
-	u32 __iomem *buf;
-	u32 pnum, nbufs;
-	u32 first, lasti;
-
-	if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
-		first = dd->ipath_piobcnt2k;
-		lasti = dd->ipath_lastpioindexl;
-	} else {
-		first = 0;
-		lasti = dd->ipath_lastpioindex;
-	}
-	nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-	buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
-
-	if (buf) {
-		/*
-		 * Set next starting place.  It's just an optimization,
-		 * it doesn't matter who wins on this, so no locking
-		 */
-		if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
-			dd->ipath_lastpioindexl = pnum + 1;
-		else
-			dd->ipath_lastpioindex = pnum + 1;
-		if (dd->ipath_upd_pio_shadow)
-			dd->ipath_upd_pio_shadow = 0;
-		if (dd->ipath_consec_nopiobuf)
-			dd->ipath_consec_nopiobuf = 0;
-		ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
-			   pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
-		if (pbufnum)
-			*pbufnum = pnum;
-
-	}
-	return buf;
-}
-
-/**
- * ipath_chg_pioavailkernel - change which send buffers are available for kernel
- * @dd: the infinipath device
- * @start: the starting send buffer number
- * @len: the number of send buffers
- * @avail: true if the buffers are available for kernel use, false otherwise
- */
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
-			      unsigned len, int avail)
-{
-	unsigned long flags;
-	unsigned end, cnt = 0;
-
-	/* There are two bits per send buffer (busy and generation) */
-	start *= 2;
-	end = start + len * 2;
-
-	spin_lock_irqsave(&ipath_pioavail_lock, flags);
-	/* Set or clear the busy bit in the shadow. */
-	while (start < end) {
-		if (avail) {
-			unsigned long dma;
-			int i, im;
-			/*
-			 * the BUSY bit will never be set, because we disarm
-			 * the user buffers before we hand them back to the
-			 * kernel.  We do have to make sure the generation
-			 * bit is set correctly in shadow, since it could
-			 * have changed many times while allocated to user.
-			 * We can't use the bitmap functions on the full
-			 * dma array because it is always little-endian, so
-			 * we have to flip to host-order first.
-			 * BITS_PER_LONG is slightly wrong, since it's
-			 * always 64 bits per register in chip...
-			 * We only work on 64 bit kernels, so that's OK.
-			 */
-			/* deal with 6110 chip bug on high register #s */
-			i = start / BITS_PER_LONG;
-			im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
-				i ^ 1 : i;
-			__clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
-				+ start, dd->ipath_pioavailshadow);
-			dma = (unsigned long) le64_to_cpu(
-				dd->ipath_pioavailregs_dma[im]);
-			if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-				+ start) % BITS_PER_LONG, &dma))
-				__set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-					+ start, dd->ipath_pioavailshadow);
-			else
-				__clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-					+ start, dd->ipath_pioavailshadow);
-			__set_bit(start, dd->ipath_pioavailkernel);
-		} else {
-			__set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
-				dd->ipath_pioavailshadow);
-			__clear_bit(start, dd->ipath_pioavailkernel);
-		}
-		start += 2;
-	}
-
-	if (dd->ipath_pioupd_thresh) {
-		end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
-		cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
-	}
-	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
-	/*
-	 * When moving buffers from kernel to user, if number assigned to
-	 * the user is less than the pio update threshold, and threshold
-	 * is supported (cnt was computed > 0), drop the update threshold
-	 * so we update at least once per allocated number of buffers.
-	 * In any case, if the kernel buffers are less than the threshold,
-	 * drop the threshold.  We don't bother increasing it, having once
-	 * decreased it, since it would typically just cycle back and forth.
-	 * If we don't decrease below buffers in use, we can wait a long
-	 * time for an update, until some other context uses PIO buffers.
-	 */
-	if (!avail && len < cnt)
-		cnt = len;
-	if (cnt < dd->ipath_pioupd_thresh) {
-		dd->ipath_pioupd_thresh = cnt;
-		ipath_dbg("Decreased pio update threshold to %u\n",
-			dd->ipath_pioupd_thresh);
-		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-		dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
-			<< INFINIPATH_S_UPDTHRESH_SHIFT);
-		dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
-			<< INFINIPATH_S_UPDTHRESH_SHIFT;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			dd->ipath_sendctrl);
-		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-	}
-}
-
-/**
- * ipath_create_rcvhdrq - create a receive header queue
- * @dd: the infinipath device
- * @pd: the port data
- *
- * this must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int ipath_create_rcvhdrq(struct ipath_devdata *dd,
-			 struct ipath_portdata *pd)
-{
-	int ret = 0;
-
-	if (!pd->port_rcvhdrq) {
-		dma_addr_t phys_hdrqtail;
-		gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-		int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-				sizeof(u32), PAGE_SIZE);
-
-		pd->port_rcvhdrq = dma_alloc_coherent(
-			&dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
-			gfp_flags);
-
-		if (!pd->port_rcvhdrq) {
-			ipath_dev_err(dd, "attempt to allocate %d bytes "
-				      "for port %u rcvhdrq failed\n",
-				      amt, pd->port_port);
-			ret = -ENOMEM;
-			goto bail;
-		}
-
-		if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-			pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
-				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
-				GFP_KERNEL);
-			if (!pd->port_rcvhdrtail_kvaddr) {
-				ipath_dev_err(dd, "attempt to allocate 1 page "
-					"for port %u rcvhdrqtailaddr "
-					"failed\n", pd->port_port);
-				ret = -ENOMEM;
-				dma_free_coherent(&dd->pcidev->dev, amt,
-					pd->port_rcvhdrq,
-					pd->port_rcvhdrq_phys);
-				pd->port_rcvhdrq = NULL;
-				goto bail;
-			}
-			pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
-			ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
-				   "physical\n", pd->port_port,
-				   (unsigned long long) phys_hdrqtail);
-		}
-
-		pd->port_rcvhdrq_size = amt;
-
-		ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
-			   "for port %u rcvhdr Q\n",
-			   amt >> PAGE_SHIFT, pd->port_rcvhdrq,
-			   (unsigned long) pd->port_rcvhdrq_phys,
-			   (unsigned long) pd->port_rcvhdrq_size,
-			   pd->port_port);
-	} else {
-		ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
-			   "hdrtailaddr@%p %llx physical\n",
-			   pd->port_port, pd->port_rcvhdrq,
-			   (unsigned long long) pd->port_rcvhdrq_phys,
-			   pd->port_rcvhdrtail_kvaddr, (unsigned long long)
-			   pd->port_rcvhdrqtailaddr_phys);
-	}
-	/* clear for security and sanity on each use */
-	memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
-	if (pd->port_rcvhdrtail_kvaddr)
-		memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
-
-	/*
-	 * tell chip each time we init it, even if we are re-using previous
-	 * memory (we zero the register at process close)
-	 */
-	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-			      pd->port_port, pd->port_rcvhdrqtailaddr_phys);
-	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
-			      pd->port_port, pd->port_rcvhdrq_phys);
-
-bail:
-	return ret;
-}
-
-
-/*
- * Flush all sends that might be in the ready to send state, as well as any
- * that are in the process of being sent.   Used whenever we need to be
- * sure the send side is idle.  Cleans up all buffer state by canceling
- * all pio buffers, and issuing an abort, which cleans up anything in the
- * launch fifo.  The cancel is superfluous on some chip versions, but
- * it's safer to always do it.
- * PIOAvail bits are updated by the chip as if normal send had happened.
- */
-void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
-{
-	unsigned long flags;
-
-	if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
-		ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
-		goto bail;
-	}
-	/*
-	 * If we have SDMA, and it's not disabled, we have to kick off the
-	 * abort state machine, provided we aren't already aborting.
-	 * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
-	 * we skip the rest of this routine. It is already "in progress"
-	 */
-	if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
-		int skip_cancel;
-		unsigned long *statp = &dd->ipath_sdma_status;
-
-		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-		skip_cancel =
-			test_and_set_bit(IPATH_SDMA_ABORTING, statp)
-			&& !test_bit(IPATH_SDMA_DISABLED, statp);
-		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-		if (skip_cancel)
-			goto bail;
-	}
-
-	ipath_dbg("Cancelling all in-progress send buffers\n");
-
-	/* skip armlaunch errs for a while */
-	dd->ipath_lastcancel = jiffies + HZ / 2;
-
-	/*
-	 * The abort bit is auto-clearing.  We also don't want pioavail
-	 * update happening during this, and we don't want any other
-	 * sends going out, so turn those off for the duration.  We read
-	 * the scratch register to be sure that cancels and the abort
-	 * have taken effect in the chip.  Otherwise two parts are same
-	 * as ipath_force_pio_avail_update()
-	 */
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
-		| INFINIPATH_S_PIOENABLE);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-		dd->ipath_sendctrl | INFINIPATH_S_ABORT);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-	/* disarm all send buffers */
-	ipath_disarm_piobufs(dd, 0,
-		dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
-
-	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-		set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
-
-	if (restore_sendctrl) {
-		/* else done by caller later if needed */
-		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-		dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
-			INFINIPATH_S_PIOENABLE;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			dd->ipath_sendctrl);
-		/* and again, be sure all have hit the chip */
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-	}
-
-	if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
-	    !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
-	    test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
-		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-		/* only wait so long for intr */
-		dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
-		dd->ipath_sdma_reset_wait = 200;
-		if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-			tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-	}
-bail:;
-}
-
-/*
- * Force an update of in-memory copy of the pioavail registers, when
- * needed for any of a variety of reasons.  We read the scratch register
- * to make it highly likely that the update will have happened by the
- * time we return.  If already off (as in cancel_sends above), this
- * routine is a nop, on the assumption that the caller will "do the
- * right thing".
- */
-void ipath_force_pio_avail_update(struct ipath_devdata *dd)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			dd->ipath_sendctrl);
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	}
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
-				int linitcmd)
-{
-	u64 mod_wd;
-	static const char *what[4] = {
-		[0] = "NOP",
-		[INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
-		[INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
-		[INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
-	};
-
-	if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
-		/*
-		 * If we are told to disable, note that so link-recovery
-		 * code does not attempt to bring us back up.
-		 */
-		preempt_disable();
-		dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
-		preempt_enable();
-	} else if (linitcmd) {
-		/*
-		 * Any other linkinitcmd will lead to LINKDOWN and then
-		 * to INIT (if all is well), so clear flag to let
-		 * link-recovery code attempt to bring us back up.
-		 */
-		preempt_disable();
-		dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
-		preempt_enable();
-	}
-
-	mod_wd = (linkcmd << dd->ibcc_lc_shift) |
-		(linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
-	ipath_cdbg(VERBOSE,
-		"Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
-		dd->ipath_unit, what[linkcmd], linitcmd,
-		ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
-			ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-			 dd->ipath_ibcctrl | mod_wd);
-	/* read from chip so write is flushed */
-	(void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-}
-
-int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
-{
-	u32 lstate;
-	int ret;
-
-	switch (newstate) {
-	case IPATH_IB_LINKDOWN_ONLY:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINKDOWN:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-					INFINIPATH_IBCC_LINKINITCMD_POLL);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINKDOWN_SLEEP:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-					INFINIPATH_IBCC_LINKINITCMD_SLEEP);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINKDOWN_DISABLE:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-					INFINIPATH_IBCC_LINKINITCMD_DISABLE);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINKARM:
-		if (dd->ipath_flags & IPATH_LINKARMED) {
-			ret = 0;
-			goto bail;
-		}
-		if (!(dd->ipath_flags &
-		      (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
-			ret = -EINVAL;
-			goto bail;
-		}
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
-
-		/*
-		 * Since the port can transition to ACTIVE by receiving
-		 * a non VL 15 packet, wait for either state.
-		 */
-		lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
-		break;
-
-	case IPATH_IB_LINKACTIVE:
-		if (dd->ipath_flags & IPATH_LINKACTIVE) {
-			ret = 0;
-			goto bail;
-		}
-		if (!(dd->ipath_flags & IPATH_LINKARMED)) {
-			ret = -EINVAL;
-			goto bail;
-		}
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
-		lstate = IPATH_LINKACTIVE;
-		break;
-
-	case IPATH_IB_LINK_LOOPBACK:
-		dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
-		dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-
-		/* turn heartbeat off, as it causes loopback to fail */
-		dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-				       IPATH_IB_HRTBT_OFF);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINK_EXTERNAL:
-		dev_info(&dd->pcidev->dev,
-			"Disabling IB local loopback (normal)\n");
-		dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-				       IPATH_IB_HRTBT_ON);
-		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	/*
-	 * Heartbeat can be explicitly enabled by the user via
-	 * "hrtbt_enable" "file", and if disabled, trying to enable here
-	 * will have no effect.  Implicit changes (heartbeat off when
-	 * loopback on, and vice versa) are included to ease testing.
-	 */
-	case IPATH_IB_LINK_HRTBT:
-		ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-			IPATH_IB_HRTBT_ON);
-		goto bail;
-
-	case IPATH_IB_LINK_NO_HRTBT:
-		ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-			IPATH_IB_HRTBT_OFF);
-		goto bail;
-
-	default:
-		ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
-		ret = -EINVAL;
-		goto bail;
-	}
-	ret = ipath_wait_linkstate(dd, lstate, 2000);
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_set_mtu - set the MTU
- * @dd: the infinipath device
- * @arg: the new MTU
- *
- * we can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size.   For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link INIT state...
- */
-int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
-{
-	u32 piosize;
-	int changed = 0;
-	int ret;
-
-	/*
-	 * mtu is IB data payload max.  It's the largest power of 2 less
-	 * than piosize (or even larger, since it only really controls the
-	 * largest we can receive; we can send the max of the mtu and
-	 * piosize).  We check that it's one of the valid IB sizes.
-	 */
-	if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
-	    (arg != 4096 || !ipath_mtu4096)) {
-		ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
-		ret = -EINVAL;
-		goto bail;
-	}
-	if (dd->ipath_ibmtu == arg) {
-		ret = 0;        /* same as current */
-		goto bail;
-	}
-
-	piosize = dd->ipath_ibmaxlen;
-	dd->ipath_ibmtu = arg;
-
-	if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
-		/* Only if it's not the initial value (or reset to it) */
-		if (piosize != dd->ipath_init_ibmaxlen) {
-			if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
-				piosize = dd->ipath_init_ibmaxlen;
-			dd->ipath_ibmaxlen = piosize;
-			changed = 1;
-		}
-	} else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
-		piosize = arg + IPATH_PIO_MAXIBHDR;
-		ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
-			   "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
-			   arg);
-		dd->ipath_ibmaxlen = piosize;
-		changed = 1;
-	}
-
-	if (changed) {
-		u64 ibc = dd->ipath_ibcctrl, ibdw;
-		/*
-		 * update our housekeeping variables, and set IBC max
-		 * size, same as init code; max IBC is max we allow in
-		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
-		 */
-		dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
-		ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
-		ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
-			 dd->ibcc_mpl_shift);
-		ibc |= ibdw << dd->ibcc_mpl_shift;
-		dd->ipath_ibcctrl = ibc;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-		dd->ipath_f_tidtemplate(dd);
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
-{
-	dd->ipath_lid = lid;
-	dd->ipath_lmc = lmc;
-
-	dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
-		(~((1U << lmc) - 1)) << 16);
-
-	dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
-
-	return 0;
-}
-
-
-/**
- * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
- * @dd: the infinipath device
- * @regno: the register number to write
- * @port: the port containing the register
- * @value: the value to write
- *
- * Registers that vary with the chip implementation constants (port)
- * use this routine.
- */
-void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
-			  unsigned port, u64 value)
-{
-	u16 where;
-
-	if (port < dd->ipath_portcnt &&
-	    (regno == dd->ipath_kregs->kr_rcvhdraddr ||
-	     regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
-		where = regno + port;
-	else
-		where = -1;
-
-	ipath_write_kreg(dd, where, value);
-}
-
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDS, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
-
-static void ipath_run_led_override(unsigned long opaque)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-	int timeoff;
-	int pidx;
-	u64 lstate, ltstate, val;
-
-	if (!(dd->ipath_flags & IPATH_INITTED))
-		return;
-
-	pidx = dd->ipath_led_override_phase++ & 1;
-	dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
-	timeoff = dd->ipath_led_override_timeoff;
-
-	/*
-	 * below potentially restores the LED values per current status,
-	 * should also possibly setup the traffic-blink register,
-	 * but leave that to per-chip functions.
-	 */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-	ltstate = ipath_ib_linktrstate(dd, val);
-	lstate = ipath_ib_linkstate(dd, val);
-
-	dd->ipath_f_setextled(dd, lstate, ltstate);
-	mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
-}
-
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
-{
-	int timeoff, freq;
-
-	if (!(dd->ipath_flags & IPATH_INITTED))
-		return;
-
-	/* First check if we are blinking. If not, use 1HZ polling */
-	timeoff = HZ;
-	freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
-
-	if (freq) {
-		/* For blink, set each phase from one nybble of val */
-		dd->ipath_led_override_vals[0] = val & 0xF;
-		dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
-		timeoff = (HZ << 4)/freq;
-	} else {
-		/* Non-blink set both phases the same. */
-		dd->ipath_led_override_vals[0] = val & 0xF;
-		dd->ipath_led_override_vals[1] = val & 0xF;
-	}
-	dd->ipath_led_override_timeoff = timeoff;
-
-	/*
-	 * If the timer has not already been started, do so. Use a "quick"
-	 * timeout so the function will be called soon, to look at our request.
-	 */
-	if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
-		/* Need to start timer */
-		setup_timer(&dd->ipath_led_override_timer,
-				ipath_run_led_override, (unsigned long)dd);
-
-		dd->ipath_led_override_timer.expires = jiffies + 1;
-		add_timer(&dd->ipath_led_override_timer);
-	} else
-		atomic_dec(&dd->ipath_led_override_timer_active);
-}
-
-/**
- * ipath_shutdown_device - shut down a device
- * @dd: the infinipath device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled.   It does not free any data structures.
- * Everything it does has to be setup again by ipath_init_chip(dd,1)
- */
-void ipath_shutdown_device(struct ipath_devdata *dd)
-{
-	unsigned long flags;
-
-	ipath_dbg("Shutting down the device\n");
-
-	ipath_hol_up(dd); /* make sure user processes aren't suspended */
-
-	dd->ipath_flags |= IPATH_LINKUNK;
-	dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
-			     IPATH_LINKINIT | IPATH_LINKARMED |
-			     IPATH_LINKACTIVE);
-	*dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
-				IPATH_STATUS_IB_READY);
-
-	/* mask interrupts, but not errors */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
-	dd->ipath_rcvctrl = 0;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-			 dd->ipath_rcvctrl);
-
-	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-		teardown_sdma(dd);
-
-	/*
-	 * gracefully stop all sends allowing any in progress to trickle out
-	 * first.
-	 */
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	dd->ipath_sendctrl = 0;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	/* flush it */
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-	/*
-	 * enough for anything that's going to trickle out to have actually
-	 * done so.
-	 */
-	udelay(5);
-
-	dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
-
-	ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
-	ipath_cancel_sends(dd, 0);
-
-	/*
-	 * we are shutting down, so tell components that care.  We don't do
-	 * this on just a link state change, much like ethernet, a cable
-	 * unplug, etc. doesn't change driver state
-	 */
-	signal_ib_event(dd, IB_EVENT_PORT_ERR);
-
-	/* disable IBC */
-	dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
-
-	/*
-	 * clear SerdesEnable and turn the leds off; do this here because
-	 * we are unloading, so don't count on interrupts to move along
-	 * Turn the LEDs off explicitly for the same reason.
-	 */
-	dd->ipath_f_quiet_serdes(dd);
-
-	/* stop all the timers that might still be running */
-	del_timer_sync(&dd->ipath_hol_timer);
-	if (dd->ipath_stats_timer_active) {
-		del_timer_sync(&dd->ipath_stats_timer);
-		dd->ipath_stats_timer_active = 0;
-	}
-	if (dd->ipath_intrchk_timer.data) {
-		del_timer_sync(&dd->ipath_intrchk_timer);
-		dd->ipath_intrchk_timer.data = 0;
-	}
-	if (atomic_read(&dd->ipath_led_override_timer_active)) {
-		del_timer_sync(&dd->ipath_led_override_timer);
-		atomic_set(&dd->ipath_led_override_timer_active, 0);
-	}
-
-	/*
-	 * clear all interrupts and errors, so that the next time the driver
-	 * is loaded or device is enabled, we know that whatever is set
-	 * happened while we were unloaded
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
-	ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
-	ipath_update_eeprom_log(dd);
-}
-
-/**
- * ipath_free_pddata - free a port's allocated data
- * @dd: the infinipath device
- * @pd: the portdata structure
- *
- * free up any allocated data for a port
- * This should not touch anything that would affect a simultaneous
- * re-allocation of port data, because it is called after ipath_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- * (The only exception to global state is freeing the port0 port0_skbs.)
- */
-void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
-{
-	if (!pd)
-		return;
-
-	if (pd->port_rcvhdrq) {
-		ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
-			   "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
-			   (unsigned long) pd->port_rcvhdrq_size);
-		dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
-				  pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
-		pd->port_rcvhdrq = NULL;
-		if (pd->port_rcvhdrtail_kvaddr) {
-			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-					 pd->port_rcvhdrtail_kvaddr,
-					 pd->port_rcvhdrqtailaddr_phys);
-			pd->port_rcvhdrtail_kvaddr = NULL;
-		}
-	}
-	if (pd->port_port && pd->port_rcvegrbuf) {
-		unsigned e;
-
-		for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-			void *base = pd->port_rcvegrbuf[e];
-			size_t size = pd->port_rcvegrbuf_size;
-
-			ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
-				   "chunk %u/%u\n", base,
-				   (unsigned long) size,
-				   e, pd->port_rcvegrbuf_chunks);
-			dma_free_coherent(&dd->pcidev->dev, size,
-				base, pd->port_rcvegrbuf_phys[e]);
-		}
-		kfree(pd->port_rcvegrbuf);
-		pd->port_rcvegrbuf = NULL;
-		kfree(pd->port_rcvegrbuf_phys);
-		pd->port_rcvegrbuf_phys = NULL;
-		pd->port_rcvegrbuf_chunks = 0;
-	} else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
-		unsigned e;
-		struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
-
-		dd->ipath_port0_skbinfo = NULL;
-		ipath_cdbg(VERBOSE, "free closed port %d "
-			   "ipath_port0_skbinfo @ %p\n", pd->port_port,
-			   skbinfo);
-		for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
-			if (skbinfo[e].skb) {
-				pci_unmap_single(dd->pcidev, skbinfo[e].phys,
-						 dd->ipath_ibmaxlen,
-						 PCI_DMA_FROMDEVICE);
-				dev_kfree_skb(skbinfo[e].skb);
-			}
-		vfree(skbinfo);
-	}
-	kfree(pd->port_tid_pg_list);
-	vfree(pd->subport_uregbase);
-	vfree(pd->subport_rcvegrbuf);
-	vfree(pd->subport_rcvhdr_base);
-	kfree(pd);
-}
-
-static int __init infinipath_init(void)
-{
-	int ret;
-
-	if (ipath_debug & __IPATH_DBG)
-		printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
-
-	/*
-	 * These must be called before the driver is registered with
-	 * the PCI subsystem.
-	 */
-	idr_init(&unit_table);
-
-	ret = pci_register_driver(&ipath_driver);
-	if (ret < 0) {
-		printk(KERN_ERR IPATH_DRV_NAME
-		       ": Unable to register driver: error %d\n", -ret);
-		goto bail_unit;
-	}
-
-	ret = ipath_init_ipathfs();
-	if (ret < 0) {
-		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
-		       "ipathfs: error %d\n", -ret);
-		goto bail_pci;
-	}
-
-	goto bail;
-
-bail_pci:
-	pci_unregister_driver(&ipath_driver);
-
-bail_unit:
-	idr_destroy(&unit_table);
-
-bail:
-	return ret;
-}
-
-static void __exit infinipath_cleanup(void)
-{
-	ipath_exit_ipathfs();
-
-	ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
-	pci_unregister_driver(&ipath_driver);
-
-	idr_destroy(&unit_table);
-}
-
-/**
- * ipath_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload).  We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize.  For
- * now, we only allow this if no user ports are open that use chip resources
- */
-int ipath_reset_device(int unit)
-{
-	int ret, i;
-	struct ipath_devdata *dd = ipath_lookup(unit);
-	unsigned long flags;
-
-	if (!dd) {
-		ret = -ENODEV;
-		goto bail;
-	}
-
-	if (atomic_read(&dd->ipath_led_override_timer_active)) {
-		/* Need to stop LED timer, _then_ shut off LEDs */
-		del_timer_sync(&dd->ipath_led_override_timer);
-		atomic_set(&dd->ipath_led_override_timer_active, 0);
-	}
-
-	/* Shut off LEDs after we are sure timer is not running */
-	dd->ipath_led_override = LED_OVER_BOTH_OFF;
-	dd->ipath_f_setextled(dd, 0, 0);
-
-	dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
-
-	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
-		dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
-			 "not initialized or not present\n", unit);
-		ret = -ENXIO;
-		goto bail;
-	}
-
-	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-	if (dd->ipath_pd)
-		for (i = 1; i < dd->ipath_cfgports; i++) {
-			if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
-				continue;
-			spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-			ipath_dbg("unit %u port %d is in use "
-				  "(PID %u cmd %s), can't reset\n",
-				  unit, i,
-				  pid_nr(dd->ipath_pd[i]->port_pid),
-				  dd->ipath_pd[i]->port_comm);
-			ret = -EBUSY;
-			goto bail;
-		}
-	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
-	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-		teardown_sdma(dd);
-
-	dd->ipath_flags &= ~IPATH_INITTED;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-	ret = dd->ipath_f_reset(dd);
-	if (ret == 1) {
-		ipath_dbg("Reinitializing unit %u after reset attempt\n",
-			  unit);
-		ret = ipath_init_chip(dd, 1);
-	} else
-		ret = -EAGAIN;
-	if (ret)
-		ipath_dev_err(dd, "Reinitialize unit %u after "
-			      "reset failed with %d\n", unit, ret);
-	else
-		dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
-			 "resetting\n", unit);
-
-bail:
-	return ret;
-}
-
-/*
- * send a signal to all the processes that have the driver open
- * through the normal interfaces (i.e., everything other than diags
- * interface).  Returns number of signalled processes.
- */
-static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
-{
-	int i, sub, any = 0;
-	struct pid *pid;
-	unsigned long flags;
-
-	if (!dd->ipath_pd)
-		return 0;
-
-	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-	for (i = 1; i < dd->ipath_cfgports; i++) {
-		if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
-			continue;
-		pid = dd->ipath_pd[i]->port_pid;
-		if (!pid)
-			continue;
-
-		dev_info(&dd->pcidev->dev, "context %d in use "
-			  "(PID %u), sending signal %d\n",
-			  i, pid_nr(pid), sig);
-		kill_pid(pid, sig, 1);
-		any++;
-		for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
-			pid = dd->ipath_pd[i]->port_subpid[sub];
-			if (!pid)
-				continue;
-			dev_info(&dd->pcidev->dev, "sub-context "
-				"%d:%d in use (PID %u), sending "
-				"signal %d\n", i, sub, pid_nr(pid), sig);
-			kill_pid(pid, sig, 1);
-			any++;
-		}
-	}
-	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-	return any;
-}
-
-static void ipath_hol_signal_down(struct ipath_devdata *dd)
-{
-	if (ipath_signal_procs(dd, SIGSTOP))
-		ipath_dbg("Stopped some processes\n");
-	ipath_cancel_sends(dd, 1);
-}
-
-
-static void ipath_hol_signal_up(struct ipath_devdata *dd)
-{
-	if (ipath_signal_procs(dd, SIGCONT))
-		ipath_dbg("Continued some processes\n");
-}
-
-/*
- * link is down, stop any users processes, and flush pending sends
- * to prevent HoL blocking, then start the HoL timer that
- * periodically continues, then stop procs, so they can detect
- * link down if they want, and do something about it.
- * Timer may already be running, so use mod_timer, not add_timer.
- */
-void ipath_hol_down(struct ipath_devdata *dd)
-{
-	dd->ipath_hol_state = IPATH_HOL_DOWN;
-	ipath_hol_signal_down(dd);
-	dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
-	dd->ipath_hol_timer.expires = jiffies +
-		msecs_to_jiffies(ipath_hol_timeout_ms);
-	mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
-}
-
-/*
- * link is up, continue any user processes, and ensure timer
- * is a nop, if running.  Let timer keep running, if set; it
- * will nop when it sees the link is up
- */
-void ipath_hol_up(struct ipath_devdata *dd)
-{
-	ipath_hol_signal_up(dd);
-	dd->ipath_hol_state = IPATH_HOL_UP;
-}
-
-/*
- * toggle the running/not running state of user proceses
- * to prevent HoL blocking on chip resources, but still allow
- * user processes to do link down special case handling.
- * Should only be called via the timer
- */
-void ipath_hol_event(unsigned long opaque)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-	if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
-		&& dd->ipath_hol_state != IPATH_HOL_UP) {
-		dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
-		ipath_dbg("Stopping processes\n");
-		ipath_hol_signal_down(dd);
-	} else { /* may do "extra" if also in ipath_hol_up() */
-		dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
-		ipath_dbg("Continuing processes\n");
-		ipath_hol_signal_up(dd);
-	}
-	if (dd->ipath_hol_state == IPATH_HOL_UP)
-		ipath_dbg("link's up, don't resched timer\n");
-	else {
-		dd->ipath_hol_timer.expires = jiffies +
-			msecs_to_jiffies(ipath_hol_timeout_ms);
-		mod_timer(&dd->ipath_hol_timer,
-			dd->ipath_hol_timer.expires);
-	}
-}
-
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
-{
-	u64 val;
-
-	if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
-		return -1;
-	if (dd->ipath_rx_pol_inv != new_pol_inv) {
-		dd->ipath_rx_pol_inv = new_pol_inv;
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-			 INFINIPATH_XGXS_RX_POL_SHIFT);
-		val |= ((u64)dd->ipath_rx_pol_inv) <<
-			INFINIPATH_XGXS_RX_POL_SHIFT;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-	}
-	return 0;
-}
-
-/*
- * Disable and enable the armlaunch error.  Used for PIO bandwidth testing on
- * the 7220, which is count-based, rather than trigger-based.  Safe for the
- * driver check, since it's at init.   Not completely safe when used for
- * user-mode checking, since some error checking can be lost, but not
- * particularly risky, and only has problematic side-effects in the face of
- * very buggy user code.  There is no reference counting, but that's also
- * fine, given the intended use.
- */
-void ipath_enable_armlaunch(struct ipath_devdata *dd)
-{
-	dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-		INFINIPATH_E_SPIOARMLAUNCH);
-	dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-		dd->ipath_errormask);
-}
-
-void ipath_disable_armlaunch(struct ipath_devdata *dd)
-{
-	/* so don't re-enable if already set */
-	dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
-	dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-		dd->ipath_errormask);
-}
-
-module_init(infinipath_init);
-module_exit(infinipath_cleanup);
diff --git a/drivers/staging/rdma/ipath/ipath_eeprom.c b/drivers/staging/rdma/ipath/ipath_eeprom.c
deleted file mode 100644
index ef84107..0000000
--- a/drivers/staging/rdma/ipath/ipath_eeprom.c
+++ /dev/null
@@ -1,1183 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-
-/*
- * InfiniPath I2C driver for a serial eeprom.  This is not a generic
- * I2C interface.  For a start, the device we're using (Atmel AT24C11)
- * doesn't work like a regular I2C device.  It looks like one
- * electrically, but not logically.  Normal I2C devices have a single
- * 7-bit or 10-bit I2C address that they respond to.  Valid 7-bit
- * addresses range from 0x03 to 0x77.  Addresses 0x00 to 0x02 and 0x78
- * to 0x7F are special reserved addresses (e.g. 0x00 is the "general
- * call" address.)  The Atmel device, on the other hand, responds to ALL
- * 7-bit addresses.  It's designed to be the only device on a given I2C
- * bus.  A 7-bit address corresponds to the memory address within the
- * Atmel device itself.
- *
- * Also, the timing requirements mean more than simple software
- * bitbanging, with readbacks from chip to ensure timing (simple udelay
- * is not enough).
- *
- * This all means that accessing the device is specialized enough
- * that using the standard kernel I2C bitbanging interface would be
- * impossible.  For example, the core I2C eeprom driver expects to find
- * a device at one or more of a limited set of addresses only.  It doesn't
- * allow writing to an eeprom.  It also doesn't provide any means of
- * accessing eeprom contents from within the kernel, only via sysfs.
- */
-
-/* Added functionality for IBA7220-based cards */
-#define IPATH_EEPROM_DEV_V1 0xA0
-#define IPATH_EEPROM_DEV_V2 0xA2
-#define IPATH_TEMP_DEV 0x98
-#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
-#define IPATH_NO_DEV (0xFF)
-
-/*
- * The number of I2C chains is proliferating. Table below brings
- * some order to the madness. The basic principle is that the
- * table is scanned from the top, and a "probe" is made to the
- * device probe_dev. If that succeeds, the chain is considered
- * to be of that type, and dd->i2c_chain_type is set to the index+1
- * of the entry.
- * The +1 is so static initialization can mean "unknown, do probe."
- */
-static struct i2c_chain_desc {
-	u8 probe_dev;	/* If seen at probe, chain is this type */
-	u8 eeprom_dev;	/* Dev addr (if any) for EEPROM */
-	u8 temp_dev;	/* Dev Addr (if any) for Temp-sense */
-} i2c_chains[] = {
-	{ IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
-	{ IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
-	{ IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
-	{ IPATH_NO_DEV }
-};
-
-enum i2c_type {
-	i2c_line_scl = 0,
-	i2c_line_sda
-};
-
-enum i2c_state {
-	i2c_line_low = 0,
-	i2c_line_high
-};
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_gpio_set - set a GPIO line
- * @dd: the infinipath device
- * @line: the line to set
- * @new_line_state: the state to set
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error.
- */
-static int i2c_gpio_set(struct ipath_devdata *dd,
-			enum i2c_type line,
-			enum i2c_state new_line_state)
-{
-	u64 out_mask, dir_mask, *gpioval;
-	unsigned long flags = 0;
-
-	gpioval = &dd->ipath_gpio_out;
-
-	if (line == i2c_line_scl) {
-		dir_mask = dd->ipath_gpio_scl;
-		out_mask = (1UL << dd->ipath_gpio_scl_num);
-	} else {
-		dir_mask = dd->ipath_gpio_sda;
-		out_mask = (1UL << dd->ipath_gpio_sda_num);
-	}
-
-	spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-	if (new_line_state == i2c_line_high) {
-		/* tri-state the output rather than force high */
-		dd->ipath_extctrl &= ~dir_mask;
-	} else {
-		/* config line to be an output */
-		dd->ipath_extctrl |= dir_mask;
-	}
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
-
-	/* set output as well (no real verify) */
-	if (new_line_state == i2c_line_high)
-		*gpioval |= out_mask;
-	else
-		*gpioval &= ~out_mask;
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
-	spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-	return 0;
-}
-
-/**
- * i2c_gpio_get - get a GPIO line state
- * @dd: the infinipath device
- * @line: the line to get
- * @curr_statep: where to put the line state
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error.  curr_state is not set on error.
- */
-static int i2c_gpio_get(struct ipath_devdata *dd,
-			enum i2c_type line,
-			enum i2c_state *curr_statep)
-{
-	u64 read_val, mask;
-	int ret;
-	unsigned long flags = 0;
-
-	/* check args */
-	if (curr_statep == NULL) {
-		ret = 1;
-		goto bail;
-	}
-
-	/* config line to be an input */
-	if (line == i2c_line_scl)
-		mask = dd->ipath_gpio_scl;
-	else
-		mask = dd->ipath_gpio_sda;
-
-	spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-	dd->ipath_extctrl &= ~mask;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
-	/*
-	 * Below is very unlikely to reflect true input state if Output
-	 * Enable actually changed.
-	 */
-	read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-	spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-	if (read_val & mask)
-		*curr_statep = i2c_line_high;
-	else
-		*curr_statep = i2c_line_low;
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the infinipath device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip.  Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct ipath_devdata *dd)
-{
-	(void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	rmb();
-}
-
-static void scl_out(struct ipath_devdata *dd, u8 bit)
-{
-	udelay(1);
-	i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
-
-	i2c_wait_for_writes(dd);
-}
-
-static void sda_out(struct ipath_devdata *dd, u8 bit)
-{
-	i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
-
-	i2c_wait_for_writes(dd);
-}
-
-static u8 sda_in(struct ipath_devdata *dd, int wait)
-{
-	enum i2c_state bit;
-
-	if (i2c_gpio_get(dd, i2c_line_sda, &bit))
-		ipath_dbg("get bit failed!\n");
-
-	if (wait)
-		i2c_wait_for_writes(dd);
-
-	return bit == i2c_line_high ? 1U : 0;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the infinipath device
- */
-static int i2c_ackrcv(struct ipath_devdata *dd)
-{
-	u8 ack_received;
-
-	/* AT ENTRY SCL = LOW */
-	/* change direction, ignore data */
-	ack_received = sda_in(dd, 1);
-	scl_out(dd, i2c_line_high);
-	ack_received = sda_in(dd, 1) == 0;
-	scl_out(dd, i2c_line_low);
-	return ack_received;
-}
-
-/**
- * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
- * @dd: the infinipath device
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct ipath_devdata *dd)
-{
-	int bit_cntr, data;
-
-	data = 0;
-
-	for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
-		data <<= 1;
-		scl_out(dd, i2c_line_high);
-		data |= sda_in(dd, 0);
-		scl_out(dd, i2c_line_low);
-	}
-	return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the infinipath device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct ipath_devdata *dd, u8 data)
-{
-	int bit_cntr;
-	u8 bit;
-
-	for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
-		bit = (data >> bit_cntr) & 1;
-		sda_out(dd, bit);
-		scl_out(dd, i2c_line_high);
-		scl_out(dd, i2c_line_low);
-	}
-	return (!i2c_ackrcv(dd)) ? 1 : 0;
-}
-
-static void send_ack(struct ipath_devdata *dd)
-{
-	sda_out(dd, i2c_line_low);
-	scl_out(dd, i2c_line_high);
-	scl_out(dd, i2c_line_low);
-	sda_out(dd, i2c_line_high);
-}
-
-/**
- * i2c_startcmd - transmit the start condition, followed by address/cmd
- * @dd: the infinipath device
- * @offset_dir: direction byte
- *
- *      (both clock/data high, clock high, data low while clock is high)
- */
-static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
-{
-	int res;
-
-	/* issue start sequence */
-	sda_out(dd, i2c_line_high);
-	scl_out(dd, i2c_line_high);
-	sda_out(dd, i2c_line_low);
-	scl_out(dd, i2c_line_low);
-
-	/* issue length and direction byte */
-	res = wr_byte(dd, offset_dir);
-
-	if (res)
-		ipath_cdbg(VERBOSE, "No ack to complete start\n");
-
-	return res;
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the infinipath device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct ipath_devdata *dd)
-{
-	scl_out(dd, i2c_line_low);
-	sda_out(dd, i2c_line_low);
-	scl_out(dd, i2c_line_high);
-	sda_out(dd, i2c_line_high);
-	udelay(2);
-}
-
-/**
- * eeprom_reset - reset I2C communication
- * @dd: the infinipath device
- */
-
-static int eeprom_reset(struct ipath_devdata *dd)
-{
-	int clock_cycles_left = 9;
-	u64 *gpioval = &dd->ipath_gpio_out;
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-	/* Make sure shadows are consistent */
-	dd->ipath_extctrl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
-	*gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
-	spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-	ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
-		   "is %llx\n", (unsigned long long) *gpioval);
-
-	/*
-	 * This is to get the i2c into a known state, by first going low,
-	 * then tristate sda (and then tristate scl as first thing
-	 * in loop)
-	 */
-	scl_out(dd, i2c_line_low);
-	sda_out(dd, i2c_line_high);
-
-	/* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
-	while (clock_cycles_left--) {
-		scl_out(dd, i2c_line_high);
-
-		/* SDA seen high, issue START by dropping it while SCL high */
-		if (sda_in(dd, 0)) {
-			sda_out(dd, i2c_line_low);
-			scl_out(dd, i2c_line_low);
-			/* ATMEL spec says must be followed by STOP. */
-			scl_out(dd, i2c_line_high);
-			sda_out(dd, i2c_line_high);
-			ret = 0;
-			goto bail;
-		}
-
-		scl_out(dd, i2c_line_low);
-	}
-
-	ret = 1;
-
-bail:
-	return ret;
-}
-
-/*
- * Probe for I2C device at specified address. Returns 0 for "success"
- * to match rest of this file.
- * Leave bus in "reasonable" state for further commands.
- */
-static int i2c_probe(struct ipath_devdata *dd, int devaddr)
-{
-	int ret;
-
-	ret = eeprom_reset(dd);
-	if (ret) {
-		ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
-			      devaddr);
-		return ret;
-	}
-	/*
-	 * Reset no longer leaves bus in start condition, so normal
-	 * i2c_startcmd() will do.
-	 */
-	ret = i2c_startcmd(dd, devaddr | READ_CMD);
-	if (ret)
-		ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
-			   devaddr);
-	else {
-		/*
-		 * Device did respond. Complete a single-byte read, because some
-		 * devices apparently cannot handle STOP immediately after they
-		 * ACK the start-cmd.
-		 */
-		int data;
-		data = rd_byte(dd);
-		stop_cmd(dd);
-		ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
-	}
-	return ret;
-}
-
-/*
- * Returns the "i2c type". This is a pointer to a struct that describes
- * the I2C chain on this board. To minimize impact on struct ipath_devdata,
- * the (small integer) index into the table is actually memoized, rather
- * then the pointer.
- * Memoization is because the type is determined on the first call per chip.
- * An alternative would be to move type determination to early
- * init code.
- */
-static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
-{
-	int idx;
-
-	/* Get memoized index, from previous successful probes */
-	idx = dd->ipath_i2c_chain_type - 1;
-	if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
-		goto done;
-
-	idx = 0;
-	while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
-		/* if probe succeeds, this is type */
-		if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
-			break;
-		++idx;
-	}
-
-	/*
-	 * Old EEPROM (first entry) may require a reset after probe,
-	 * rather than being able to "start" after "stop"
-	 */
-	if (idx == 0)
-		eeprom_reset(dd);
-
-	if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
-		idx = -1;
-	else
-		dd->ipath_i2c_chain_type = idx + 1;
-done:
-	return (idx >= 0) ? i2c_chains + idx : NULL;
-}
-
-static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
-					u8 eeprom_offset, void *buffer, int len)
-{
-	int ret;
-	struct i2c_chain_desc *icd;
-	u8 *bp = buffer;
-
-	ret = 1;
-	icd = ipath_i2c_type(dd);
-	if (!icd)
-		goto bail;
-
-	if (icd->eeprom_dev == IPATH_NO_DEV) {
-		/* legacy not-really-I2C */
-		ipath_cdbg(VERBOSE, "Start command only address\n");
-		eeprom_offset = (eeprom_offset << 1) | READ_CMD;
-		ret = i2c_startcmd(dd, eeprom_offset);
-	} else {
-		/* Actual I2C */
-		ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
-		if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
-			ipath_dbg("Failed EEPROM startcmd\n");
-			stop_cmd(dd);
-			ret = 1;
-			goto bail;
-		}
-		ret = wr_byte(dd, eeprom_offset);
-		stop_cmd(dd);
-		if (ret) {
-			ipath_dev_err(dd, "Failed to write EEPROM address\n");
-			ret = 1;
-			goto bail;
-		}
-		ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
-	}
-	if (ret) {
-		ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
-		stop_cmd(dd);
-		ret = 1;
-		goto bail;
-	}
-
-	/*
-	 * eeprom keeps clocking data out as long as we ack, automatically
-	 * incrementing the address.
-	 */
-	while (len-- > 0) {
-		/* get and store data */
-		*bp++ = rd_byte(dd);
-		/* send ack if not the last byte */
-		if (len)
-			send_ack(dd);
-	}
-
-	stop_cmd(dd);
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
-				       const void *buffer, int len)
-{
-	int sub_len;
-	const u8 *bp = buffer;
-	int max_wait_time, i;
-	int ret;
-	struct i2c_chain_desc *icd;
-
-	ret = 1;
-	icd = ipath_i2c_type(dd);
-	if (!icd)
-		goto bail;
-
-	while (len > 0) {
-		if (icd->eeprom_dev == IPATH_NO_DEV) {
-			if (i2c_startcmd(dd,
-					 (eeprom_offset << 1) | WRITE_CMD)) {
-				ipath_dbg("Failed to start cmd offset %u\n",
-					eeprom_offset);
-				goto failed_write;
-			}
-		} else {
-			/* Real I2C */
-			if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
-				ipath_dbg("Failed EEPROM startcmd\n");
-				goto failed_write;
-			}
-			ret = wr_byte(dd, eeprom_offset);
-			if (ret) {
-				ipath_dev_err(dd, "Failed to write EEPROM "
-					      "address\n");
-				goto failed_write;
-			}
-		}
-
-		sub_len = min(len, 4);
-		eeprom_offset += sub_len;
-		len -= sub_len;
-
-		for (i = 0; i < sub_len; i++) {
-			if (wr_byte(dd, *bp++)) {
-				ipath_dbg("no ack after byte %u/%u (%u "
-					  "total remain)\n", i, sub_len,
-					  len + sub_len - i);
-				goto failed_write;
-			}
-		}
-
-		stop_cmd(dd);
-
-		/*
-		 * wait for write complete by waiting for a successful
-		 * read (the chip replies with a zero after the write
-		 * cmd completes, and before it writes to the eeprom.
-		 * The startcmd for the read will fail the ack until
-		 * the writes have completed.   We do this inline to avoid
-		 * the debug prints that are in the real read routine
-		 * if the startcmd fails.
-		 * We also use the proper device address, so it doesn't matter
-		 * whether we have real eeprom_dev. legacy likes any address.
-		 */
-		max_wait_time = 100;
-		while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
-			stop_cmd(dd);
-			if (!--max_wait_time) {
-				ipath_dbg("Did not get successful read to "
-					  "complete write\n");
-				goto failed_write;
-			}
-		}
-		/* now read (and ignore) the resulting byte */
-		rd_byte(dd);
-		stop_cmd(dd);
-	}
-
-	ret = 0;
-	goto bail;
-
-failed_write:
-	stop_cmd(dd);
-	ret = 1;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: address to read from
- * @buffer: where to store result
- * @len: number of bytes to receive
- */
-int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
-			void *buff, int len)
-{
-	int ret;
-
-	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-	if (!ret) {
-		ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
-		mutex_unlock(&dd->ipath_eep_lock);
-	}
-
-	return ret;
-}
-
-/**
- * ipath_eeprom_write - writes data to the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: where to place data
- * @buffer: data to write
- * @len: number of bytes to write
- */
-int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
-			const void *buff, int len)
-{
-	int ret;
-
-	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-	if (!ret) {
-		ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
-		mutex_unlock(&dd->ipath_eep_lock);
-	}
-
-	return ret;
-}
-
-static u8 flash_csum(struct ipath_flash *ifp, int adjust)
-{
-	u8 *ip = (u8 *) ifp;
-	u8 csum = 0, len;
-
-	/*
-	 * Limit length checksummed to max length of actual data.
-	 * Checksum of erased eeprom will still be bad, but we avoid
-	 * reading past the end of the buffer we were passed.
-	 */
-	len = ifp->if_length;
-	if (len > sizeof(struct ipath_flash))
-		len = sizeof(struct ipath_flash);
-	while (len--)
-		csum += *ip++;
-	csum -= ifp->if_csum;
-	csum = ~csum;
-	if (adjust)
-		ifp->if_csum = csum;
-
-	return csum;
-}
-
-/**
- * ipath_get_guid - get the GUID from the i2c device
- * @dd: the infinipath device
- *
- * We have the capability to use the ipath_nguid field, and get
- * the guid from the first chip's flash, to use for all of them.
- */
-void ipath_get_eeprom_info(struct ipath_devdata *dd)
-{
-	void *buf;
-	struct ipath_flash *ifp;
-	__be64 guid;
-	int len, eep_stat;
-	u8 csum, *bguid;
-	int t = dd->ipath_unit;
-	struct ipath_devdata *dd0 = ipath_lookup(0);
-
-	if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
-		u8 oguid;
-		dd->ipath_guid = dd0->ipath_guid;
-		bguid = (u8 *) & dd->ipath_guid;
-
-		oguid = bguid[7];
-		bguid[7] += t;
-		if (oguid > bguid[7]) {
-			if (bguid[6] == 0xff) {
-				if (bguid[5] == 0xff) {
-					ipath_dev_err(
-						dd,
-						"Can't set %s GUID from "
-						"base, wraps to OUI!\n",
-						ipath_get_unit_name(t));
-					dd->ipath_guid = 0;
-					goto bail;
-				}
-				bguid[5]++;
-			}
-			bguid[6]++;
-		}
-		dd->ipath_nguid = 1;
-
-		ipath_dbg("nguid %u, so adding %u to device 0 guid, "
-			  "for %llx\n",
-			  dd0->ipath_nguid, t,
-			  (unsigned long long) be64_to_cpu(dd->ipath_guid));
-		goto bail;
-	}
-
-	/*
-	 * read full flash, not just currently used part, since it may have
-	 * been written with a newer definition
-	 * */
-	len = sizeof(struct ipath_flash);
-	buf = vmalloc(len);
-	if (!buf) {
-		ipath_dev_err(dd, "Couldn't allocate memory to read %u "
-			      "bytes from eeprom for GUID\n", len);
-		goto bail;
-	}
-
-	mutex_lock(&dd->ipath_eep_lock);
-	eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
-	mutex_unlock(&dd->ipath_eep_lock);
-
-	if (eep_stat) {
-		ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
-		goto done;
-	}
-	ifp = (struct ipath_flash *)buf;
-
-	csum = flash_csum(ifp, 0);
-	if (csum != ifp->if_csum) {
-		dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
-			 "0x%x, not 0x%x\n", csum, ifp->if_csum);
-		goto done;
-	}
-	if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
-	    *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
-		ipath_dev_err(dd, "Invalid GUID %llx from flash; "
-			      "ignoring\n",
-			      *(unsigned long long *) ifp->if_guid);
-		/* don't allow GUID if all 0 or all 1's */
-		goto done;
-	}
-
-	/* complain, but allow it */
-	if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
-		dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
-			 "default, probably not correct!\n",
-			 *(unsigned long long *) ifp->if_guid);
-
-	bguid = ifp->if_guid;
-	if (!bguid[0] && !bguid[1] && !bguid[2]) {
-		/* original incorrect GUID format in flash; fix in
-		 * core copy, by shifting up 2 octets; don't need to
-		 * change top octet, since both it and shifted are
-		 * 0.. */
-		bguid[1] = bguid[3];
-		bguid[2] = bguid[4];
-		bguid[3] = bguid[4] = 0;
-		guid = *(__be64 *) ifp->if_guid;
-		ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
-			   "shifting 2 octets\n");
-	} else
-		guid = *(__be64 *) ifp->if_guid;
-	dd->ipath_guid = guid;
-	dd->ipath_nguid = ifp->if_numguid;
-	/*
-	 * Things are slightly complicated by the desire to transparently
-	 * support both the Pathscale 10-digit serial number and the QLogic
-	 * 13-character version.
-	 */
-	if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
-		&& ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
-		/* This board has a Serial-prefix, which is stored
-		 * elsewhere for backward-compatibility.
-		 */
-		char *snp = dd->ipath_serial;
-		memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-		snp[sizeof ifp->if_sprefix] = '\0';
-		len = strlen(snp);
-		snp += len;
-		len = (sizeof dd->ipath_serial) - len;
-		if (len > sizeof ifp->if_serial) {
-			len = sizeof ifp->if_serial;
-		}
-		memcpy(snp, ifp->if_serial, len);
-	} else
-		memcpy(dd->ipath_serial, ifp->if_serial,
-		       sizeof ifp->if_serial);
-	if (!strstr(ifp->if_comment, "Tested successfully"))
-		ipath_dev_err(dd, "Board SN %s did not pass functional "
-			"test: %s\n", dd->ipath_serial,
-			ifp->if_comment);
-
-	ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
-		   (unsigned long long) be64_to_cpu(dd->ipath_guid));
-
-	memcpy(&dd->ipath_eep_st_errs, &ifp->if_errcntp, IPATH_EEP_LOG_CNT);
-	/*
-	 * Power-on (actually "active") hours are kept as little-endian value
-	 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-	 * atomic_t while running.
-	 */
-	atomic_set(&dd->ipath_active_time, 0);
-	dd->ipath_eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
-done:
-	vfree(buf);
-
-bail:;
-}
-
-/**
- * ipath_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the infinipath device
- *
- * Although the time is kept as seconds in the ipath_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct ipath_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-
-int ipath_update_eeprom_log(struct ipath_devdata *dd)
-{
-	void *buf;
-	struct ipath_flash *ifp;
-	int len, hi_water;
-	uint32_t new_time, new_hrs;
-	u8 csum;
-	int ret, idx;
-	unsigned long flags;
-
-	/* first, check if we actually need to do anything. */
-	ret = 0;
-	for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-		if (dd->ipath_eep_st_new_errs[idx]) {
-			ret = 1;
-			break;
-		}
-	}
-	new_time = atomic_read(&dd->ipath_active_time);
-
-	if (ret == 0 && new_time < 3600)
-		return 0;
-
-	/*
-	 * The quick-check above determined that there is something worthy
-	 * of logging, so get current contents and do a more detailed idea.
-	 * read full flash, not just currently used part, since it may have
-	 * been written with a newer definition
-	 */
-	len = sizeof(struct ipath_flash);
-	buf = vmalloc(len);
-	ret = 1;
-	if (!buf) {
-		ipath_dev_err(dd, "Couldn't allocate memory to read %u "
-				"bytes from eeprom for logging\n", len);
-		goto bail;
-	}
-
-	/* Grab semaphore and read current EEPROM. If we get an
-	 * error, let go, but if not, keep it until we finish write.
-	 */
-	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-	if (ret) {
-		ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-		goto free_bail;
-	}
-	ret = ipath_eeprom_internal_read(dd, 0, buf, len);
-	if (ret) {
-		mutex_unlock(&dd->ipath_eep_lock);
-		ipath_dev_err(dd, "Unable read EEPROM for logging\n");
-		goto free_bail;
-	}
-	ifp = (struct ipath_flash *)buf;
-
-	csum = flash_csum(ifp, 0);
-	if (csum != ifp->if_csum) {
-		mutex_unlock(&dd->ipath_eep_lock);
-		ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-				csum, ifp->if_csum);
-		ret = 1;
-		goto free_bail;
-	}
-	hi_water = 0;
-	spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-	for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-		int new_val = dd->ipath_eep_st_new_errs[idx];
-		if (new_val) {
-			/*
-			 * If we have seen any errors, add to EEPROM values
-			 * We need to saturate at 0xFF (255) and we also
-			 * would need to adjust the checksum if we were
-			 * trying to minimize EEPROM traffic
-			 * Note that we add to actual current count in EEPROM,
-			 * in case it was altered while we were running.
-			 */
-			new_val += ifp->if_errcntp[idx];
-			if (new_val > 0xFF)
-				new_val = 0xFF;
-			if (ifp->if_errcntp[idx] != new_val) {
-				ifp->if_errcntp[idx] = new_val;
-				hi_water = offsetof(struct ipath_flash,
-						if_errcntp) + idx;
-			}
-			/*
-			 * update our shadow (used to minimize EEPROM
-			 * traffic), to match what we are about to write.
-			 */
-			dd->ipath_eep_st_errs[idx] = new_val;
-			dd->ipath_eep_st_new_errs[idx] = 0;
-		}
-	}
-	/*
-	 * now update active-time. We would like to round to the nearest hour
-	 * but unless atomic_t are sure to be proper signed ints we cannot,
-	 * because we need to account for what we "transfer" to EEPROM and
-	 * if we log an hour at 31 minutes, then we would need to set
-	 * active_time to -29 to accurately count the _next_ hour.
-	 */
-	if (new_time >= 3600) {
-		new_hrs = new_time / 3600;
-		atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
-		new_hrs += dd->ipath_eep_hrs;
-		if (new_hrs > 0xFFFF)
-			new_hrs = 0xFFFF;
-		dd->ipath_eep_hrs = new_hrs;
-		if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-			ifp->if_powerhour[0] = new_hrs & 0xFF;
-			hi_water = offsetof(struct ipath_flash, if_powerhour);
-		}
-		if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-			ifp->if_powerhour[1] = new_hrs >> 8;
-			hi_water = offsetof(struct ipath_flash, if_powerhour)
-					+ 1;
-		}
-	}
-	/*
-	 * There is a tiny possibility that we could somehow fail to write
-	 * the EEPROM after updating our shadows, but problems from holding
-	 * the spinlock too long are a much bigger issue.
-	 */
-	spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-	if (hi_water) {
-		/* we made some change to the data, uopdate cksum and write */
-		csum = flash_csum(ifp, 1);
-		ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
-	}
-	mutex_unlock(&dd->ipath_eep_lock);
-	if (ret)
-		ipath_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-	vfree(buf);
-bail:
-	return ret;
-
-}
-
-/**
- * ipath_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the infinipath device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever ipath_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
-{
-	uint new_val;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-	new_val = dd->ipath_eep_st_new_errs[eidx] + incr;
-	if (new_val > 255)
-		new_val = 255;
-	dd->ipath_eep_st_new_errs[eidx] = new_val;
-	spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-	return;
-}
-
-static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
-{
-	int ret;
-	struct i2c_chain_desc *icd;
-
-	ret = -ENOENT;
-
-	icd = ipath_i2c_type(dd);
-	if (!icd)
-		goto bail;
-
-	if (icd->temp_dev == IPATH_NO_DEV) {
-		/* tempsense only exists on new, real-I2C boards */
-		ret = -ENXIO;
-		goto bail;
-	}
-
-	if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
-		ipath_dbg("Failed tempsense startcmd\n");
-		stop_cmd(dd);
-		ret = -ENXIO;
-		goto bail;
-	}
-	ret = wr_byte(dd, regnum);
-	stop_cmd(dd);
-	if (ret) {
-		ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
-			      regnum);
-		ret = -ENXIO;
-		goto bail;
-	}
-	if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
-		ipath_dbg("Failed tempsense RD startcmd\n");
-		stop_cmd(dd);
-		ret = -ENXIO;
-		goto bail;
-	}
-	/*
-	 * We can only clock out one byte per command, sensibly
-	 */
-	ret = rd_byte(dd);
-	stop_cmd(dd);
-
-bail:
-	return ret;
-}
-
-#define VALID_TS_RD_REG_MASK 0xBF
-
-/**
- * ipath_tempsense_read - read register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to read from
- *
- * returns reg contents (0..255) or < 0 for error
- */
-int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
-{
-	int ret;
-
-	if (regnum > 7)
-		return -EINVAL;
-
-	/* return a bogus value for (the one) register we do not have */
-	if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
-		return 0;
-
-	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-	if (!ret) {
-		ret = ipath_tempsense_internal_read(dd, regnum);
-		mutex_unlock(&dd->ipath_eep_lock);
-	}
-
-	/*
-	 * There are three possibilities here:
-	 * ret is actual value (0..255)
-	 * ret is -ENXIO or -EINVAL from code in this file
-	 * ret is -EINTR from mutex_lock_interruptible.
-	 */
-	return ret;
-}
-
-static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
-					  u8 regnum, u8 data)
-{
-	int ret = -ENOENT;
-	struct i2c_chain_desc *icd;
-
-	icd = ipath_i2c_type(dd);
-	if (!icd)
-		goto bail;
-
-	if (icd->temp_dev == IPATH_NO_DEV) {
-		/* tempsense only exists on new, real-I2C boards */
-		ret = -ENXIO;
-		goto bail;
-	}
-	if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
-		ipath_dbg("Failed tempsense startcmd\n");
-		stop_cmd(dd);
-		ret = -ENXIO;
-		goto bail;
-	}
-	ret = wr_byte(dd, regnum);
-	if (ret) {
-		stop_cmd(dd);
-		ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
-			      regnum);
-		ret = -ENXIO;
-		goto bail;
-	}
-	ret = wr_byte(dd, data);
-	stop_cmd(dd);
-	ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
-	if (ret) {
-		ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
-			      regnum);
-		ret = -ENXIO;
-	}
-
-bail:
-	return ret;
-}
-
-#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
-
-/**
- * ipath_tempsense_write - write register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to write
- * @data: data to write
- *
- * returns 0 for success or < 0 for error
- */
-int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
-{
-	int ret;
-
-	if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
-		return -EINVAL;
-
-	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-	if (!ret) {
-		ret = ipath_tempsense_internal_write(dd, regnum, data);
-		mutex_unlock(&dd->ipath_eep_lock);
-	}
-
-	/*
-	 * There are three possibilities here:
-	 * ret is 0 for success
-	 * ret is -ENXIO or -EINVAL from code in this file
-	 * ret is -EINTR from mutex_lock_interruptible.
-	 */
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
deleted file mode 100644
index 6187b84..0000000
--- a/drivers/staging/rdma/ipath/ipath_file_ops.c
+++ /dev/null
@@ -1,2619 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/swap.h>
-#include <linux/export.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/cpu.h>
-#include <linux/uio.h>
-#include <asm/pgtable.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-#include "ipath_user_sdma.h"
-
-static int ipath_open(struct inode *, struct file *);
-static int ipath_close(struct inode *, struct file *);
-static ssize_t ipath_write(struct file *, const char __user *, size_t,
-			   loff_t *);
-static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
-static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
-static int ipath_mmap(struct file *, struct vm_area_struct *);
-
-/*
- * This is really, really weird shit - write() and writev() here
- * have completely unrelated semantics.  Sucky userland ABI,
- * film at 11.
- */
-static const struct file_operations ipath_file_ops = {
-	.owner = THIS_MODULE,
-	.write = ipath_write,
-	.write_iter = ipath_write_iter,
-	.open = ipath_open,
-	.release = ipath_close,
-	.poll = ipath_poll,
-	.mmap = ipath_mmap,
-	.llseek = noop_llseek,
-};
-
-/*
- * Convert kernel virtual addresses to physical addresses so they don't
- * potentially conflict with the chip addresses used as mmap offsets.
- * It doesn't really matter what mmap offset we use as long as we can
- * interpret it correctly.
- */
-static u64 cvt_kvaddr(void *p)
-{
-	struct page *page;
-	u64 paddr = 0;
-
-	page = vmalloc_to_page(p);
-	if (page)
-		paddr = page_to_pfn(page) << PAGE_SHIFT;
-
-	return paddr;
-}
-
-static int ipath_get_base_info(struct file *fp,
-			       void __user *ubase, size_t ubase_size)
-{
-	struct ipath_portdata *pd = port_fp(fp);
-	int ret = 0;
-	struct ipath_base_info *kinfo = NULL;
-	struct ipath_devdata *dd = pd->port_dd;
-	unsigned subport_cnt;
-	int shared, master;
-	size_t sz;
-
-	subport_cnt = pd->port_subport_cnt;
-	if (!subport_cnt) {
-		shared = 0;
-		master = 0;
-		subport_cnt = 1;
-	} else {
-		shared = 1;
-		master = !subport_fp(fp);
-	}
-
-	sz = sizeof(*kinfo);
-	/* If port sharing is not requested, allow the old size structure */
-	if (!shared)
-		sz -= 7 * sizeof(u64);
-	if (ubase_size < sz) {
-		ipath_cdbg(PROC,
-			   "Base size %zu, need %zu (version mismatch?)\n",
-			   ubase_size, sz);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
-	if (kinfo == NULL) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	ret = dd->ipath_f_get_base_info(pd, kinfo);
-	if (ret < 0)
-		goto bail;
-
-	kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
-	kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
-	kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
-	kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
-	/*
-	 * have to mmap whole thing
-	 */
-	kinfo->spi_rcv_egrbuftotlen =
-		pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
-	kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
-	kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
-		pd->port_rcvegrbuf_chunks;
-	kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
-	if (master)
-		kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
-	/*
-	 * for this use, may be ipath_cfgports summed over all chips that
-	 * are are configured and present
-	 */
-	kinfo->spi_nports = dd->ipath_cfgports;
-	/* unit (chip/board) our port is on */
-	kinfo->spi_unit = dd->ipath_unit;
-	/* for now, only a single page */
-	kinfo->spi_tid_maxsize = PAGE_SIZE;
-
-	/*
-	 * Doing this per port, and based on the skip value, etc.  This has
-	 * to be the actual buffer size, since the protocol code treats it
-	 * as an array.
-	 *
-	 * These have to be set to user addresses in the user code via mmap.
-	 * These values are used on return to user code for the mmap target
-	 * addresses only.  For 32 bit, same 44 bit address problem, so use
-	 * the physical address, not virtual.  Before 2.6.11, using the
-	 * page_address() macro worked, but in 2.6.11, even that returns the
-	 * full 64 bit address (upper bits all 1's).  So far, using the
-	 * physical addresses (or chip offsets, for chip mapping) works, but
-	 * no doubt some future kernel release will change that, and we'll be
-	 * on to yet another method of dealing with this.
-	 */
-	kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
-	kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
-	kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
-	kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
-	kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
-		(void *) dd->ipath_statusp -
-		(void *) dd->ipath_pioavailregs_dma;
-	if (!shared) {
-		kinfo->spi_piocnt = pd->port_piocnt;
-		kinfo->spi_piobufbase = (u64) pd->port_piobufs;
-		kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
-			dd->ipath_ureg_align * pd->port_port;
-	} else if (master) {
-		kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
-				    (pd->port_piocnt % subport_cnt);
-		/* Master's PIO buffers are after all the slave's */
-		kinfo->spi_piobufbase = (u64) pd->port_piobufs +
-			dd->ipath_palign *
-			(pd->port_piocnt - kinfo->spi_piocnt);
-	} else {
-		unsigned slave = subport_fp(fp) - 1;
-
-		kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
-		kinfo->spi_piobufbase = (u64) pd->port_piobufs +
-			dd->ipath_palign * kinfo->spi_piocnt * slave;
-	}
-
-	if (shared) {
-		kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
-			dd->ipath_ureg_align * pd->port_port;
-		kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
-		kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
-		kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
-
-		kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
-			PAGE_SIZE * subport_fp(fp));
-
-		kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
-			pd->port_rcvhdrq_size * subport_fp(fp));
-		kinfo->spi_rcvhdr_tailaddr = 0;
-		kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
-			pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
-			subport_fp(fp));
-
-		kinfo->spi_subport_uregbase =
-			cvt_kvaddr(pd->subport_uregbase);
-		kinfo->spi_subport_rcvegrbuf =
-			cvt_kvaddr(pd->subport_rcvegrbuf);
-		kinfo->spi_subport_rcvhdr_base =
-			cvt_kvaddr(pd->subport_rcvhdr_base);
-		ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
-			kinfo->spi_port, kinfo->spi_runtime_flags,
-			(unsigned long long) kinfo->spi_subport_uregbase,
-			(unsigned long long) kinfo->spi_subport_rcvegrbuf,
-			(unsigned long long) kinfo->spi_subport_rcvhdr_base);
-	}
-
-	/*
-	 * All user buffers are 2KB buffers.  If we ever support
-	 * giving 4KB buffers to user processes, this will need some
-	 * work.
-	 */
-	kinfo->spi_pioindex = (kinfo->spi_piobufbase -
-		(dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
-	kinfo->spi_pioalign = dd->ipath_palign;
-
-	kinfo->spi_qpair = IPATH_KD_QP;
-	/*
-	 * user mode PIO buffers are always 2KB, even when 4KB can
-	 * be received, and sent via the kernel; this is ibmaxlen
-	 * for 2K MTU.
-	 */
-	kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
-	kinfo->spi_mtu = dd->ipath_ibmaxlen;	/* maxlen, not ibmtu */
-	kinfo->spi_port = pd->port_port;
-	kinfo->spi_subport = subport_fp(fp);
-	kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
-	kinfo->spi_hw_version = dd->ipath_revision;
-
-	if (master) {
-		kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
-	}
-
-	sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
-	if (copy_to_user(ubase, kinfo, sz))
-		ret = -EFAULT;
-
-bail:
-	kfree(kinfo);
-	return ret;
-}
-
-/**
- * ipath_tid_update - update a port TID
- * @pd: the port
- * @fp: the ipath device file
- * @ti: the TID information
- *
- * The new implementation as of Oct 2004 is that the driver assigns
- * the tid and returns it to the caller.   To make it easier to
- * catch bugs, and to reduce search time, we keep a cursor for
- * each port, walking the shadow tid array to find one that's not
- * in use.
- *
- * For now, if we can't allocate the full list, we fail, although
- * in the long run, we'll allocate as many as we can, and the
- * caller will deal with that by trying the remaining pages later.
- * That means that when we fail, we have to mark the tids as not in
- * use again, in our shadow copy.
- *
- * It's up to the caller to free the tids when they are done.
- * We'll unlock the pages as they free them.
- *
- * Also, right now we are locking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.
- */
-static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
-			    const struct ipath_tid_info *ti)
-{
-	int ret = 0, ntids;
-	u32 tid, porttid, cnt, i, tidcnt, tidoff;
-	u16 *tidlist;
-	struct ipath_devdata *dd = pd->port_dd;
-	u64 physaddr;
-	unsigned long vaddr;
-	u64 __iomem *tidbase;
-	unsigned long tidmap[8];
-	struct page **pagep = NULL;
-	unsigned subport = subport_fp(fp);
-
-	if (!dd->ipath_pageshadow) {
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	cnt = ti->tidcnt;
-	if (!cnt) {
-		ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
-			  (unsigned long long) ti->tidlist);
-		/*
-		 * Should we treat as success?  likely a bug
-		 */
-		ret = -EFAULT;
-		goto done;
-	}
-	porttid = pd->port_port * dd->ipath_rcvtidcnt;
-	if (!pd->port_subport_cnt) {
-		tidcnt = dd->ipath_rcvtidcnt;
-		tid = pd->port_tidcursor;
-		tidoff = 0;
-	} else if (!subport) {
-		tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
-			 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
-		tidoff = dd->ipath_rcvtidcnt - tidcnt;
-		porttid += tidoff;
-		tid = tidcursor_fp(fp);
-	} else {
-		tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
-		tidoff = tidcnt * (subport - 1);
-		porttid += tidoff;
-		tid = tidcursor_fp(fp);
-	}
-	if (cnt > tidcnt) {
-		/* make sure it all fits in port_tid_pg_list */
-		dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
-			 "TIDs, only trying max (%u)\n", cnt, tidcnt);
-		cnt = tidcnt;
-	}
-	pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
-	tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
-
-	memset(tidmap, 0, sizeof(tidmap));
-	/* before decrement; chip actual # */
-	ntids = tidcnt;
-	tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
-				   dd->ipath_rcvtidbase +
-				   porttid * sizeof(*tidbase));
-
-	ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
-		   pd->port_port, cnt, tid, tidbase);
-
-	/* virtual address of first page in transfer */
-	vaddr = ti->tidvaddr;
-	if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
-		       cnt * PAGE_SIZE)) {
-		ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
-			  (void *)vaddr, cnt);
-		ret = -EFAULT;
-		goto done;
-	}
-	ret = ipath_get_user_pages(vaddr, cnt, pagep);
-	if (ret) {
-		if (ret == -EBUSY) {
-			ipath_dbg("Failed to lock addr %p, %u pages "
-				  "(already locked)\n",
-				  (void *) vaddr, cnt);
-			/*
-			 * for now, continue, and see what happens but with
-			 * the new implementation, this should never happen,
-			 * unless perhaps the user has mpin'ed the pages
-			 * themselves (something we need to test)
-			 */
-			ret = 0;
-		} else {
-			dev_info(&dd->pcidev->dev,
-				 "Failed to lock addr %p, %u pages: "
-				 "errno %d\n", (void *) vaddr, cnt, -ret);
-			goto done;
-		}
-	}
-	for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
-		for (; ntids--; tid++) {
-			if (tid == tidcnt)
-				tid = 0;
-			if (!dd->ipath_pageshadow[porttid + tid])
-				break;
-		}
-		if (ntids < 0) {
-			/*
-			 * oops, wrapped all the way through their TIDs,
-			 * and didn't have enough free; see comments at
-			 * start of routine
-			 */
-			ipath_dbg("Not enough free TIDs for %u pages "
-				  "(index %d), failing\n", cnt, i);
-			i--;	/* last tidlist[i] not filled in */
-			ret = -ENOMEM;
-			break;
-		}
-		tidlist[i] = tid + tidoff;
-		ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
-			   "vaddr %lx\n", i, tid + tidoff, vaddr);
-		/* we "know" system pages and TID pages are same size */
-		dd->ipath_pageshadow[porttid + tid] = pagep[i];
-		dd->ipath_physshadow[porttid + tid] = ipath_map_page(
-			dd->pcidev, pagep[i], 0, PAGE_SIZE,
-			PCI_DMA_FROMDEVICE);
-		/*
-		 * don't need atomic or it's overhead
-		 */
-		__set_bit(tid, tidmap);
-		physaddr = dd->ipath_physshadow[porttid + tid];
-		ipath_stats.sps_pagelocks++;
-		ipath_cdbg(VERBOSE,
-			   "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
-			   tid, vaddr, (unsigned long long) physaddr,
-			   pagep[i]);
-		dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
-				    physaddr);
-		/*
-		 * don't check this tid in ipath_portshadow, since we
-		 * just filled it in; start with the next one.
-		 */
-		tid++;
-	}
-
-	if (ret) {
-		u32 limit;
-	cleanup:
-		/* jump here if copy out of updated info failed... */
-		ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
-			  -ret, i, cnt);
-		/* same code that's in ipath_free_tid() */
-		limit = sizeof(tidmap) * BITS_PER_BYTE;
-		if (limit > tidcnt)
-			/* just in case size changes in future */
-			limit = tidcnt;
-		tid = find_first_bit((const unsigned long *)tidmap, limit);
-		for (; tid < limit; tid++) {
-			if (!test_bit(tid, tidmap))
-				continue;
-			if (dd->ipath_pageshadow[porttid + tid]) {
-				ipath_cdbg(VERBOSE, "Freeing TID %u\n",
-					   tid);
-				dd->ipath_f_put_tid(dd, &tidbase[tid],
-						    RCVHQ_RCV_TYPE_EXPECTED,
-						    dd->ipath_tidinvalid);
-				pci_unmap_page(dd->pcidev,
-					dd->ipath_physshadow[porttid + tid],
-					PAGE_SIZE, PCI_DMA_FROMDEVICE);
-				dd->ipath_pageshadow[porttid + tid] = NULL;
-				ipath_stats.sps_pageunlocks++;
-			}
-		}
-		ipath_release_user_pages(pagep, cnt);
-	} else {
-		/*
-		 * Copy the updated array, with ipath_tid's filled in, back
-		 * to user.  Since we did the copy in already, this "should
-		 * never fail" If it does, we have to clean up...
-		 */
-		if (copy_to_user((void __user *)
-				 (unsigned long) ti->tidlist,
-				 tidlist, cnt * sizeof(*tidlist))) {
-			ret = -EFAULT;
-			goto cleanup;
-		}
-		if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-				 tidmap, sizeof tidmap)) {
-			ret = -EFAULT;
-			goto cleanup;
-		}
-		if (tid == tidcnt)
-			tid = 0;
-		if (!pd->port_subport_cnt)
-			pd->port_tidcursor = tid;
-		else
-			tidcursor_fp(fp) = tid;
-	}
-
-done:
-	if (ret)
-		ipath_dbg("Failed to map %u TID pages, failing with %d\n",
-			  ti->tidcnt, -ret);
-	return ret;
-}
-
-/**
- * ipath_tid_free - free a port TID
- * @pd: the port
- * @subport: the subport
- * @ti: the TID info
- *
- * right now we are unlocking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.  We check that the TID is in range for this port
- * but otherwise don't check validity; if user has an error and
- * frees the wrong tid, it's only their own data that can thereby
- * be corrupted.  We do check that the TID was in use, for sanity
- * We always use our idea of the saved address, not the address that
- * they pass in to us.
- */
-
-static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
-			  const struct ipath_tid_info *ti)
-{
-	int ret = 0;
-	u32 tid, porttid, cnt, limit, tidcnt;
-	struct ipath_devdata *dd = pd->port_dd;
-	u64 __iomem *tidbase;
-	unsigned long tidmap[8];
-
-	if (!dd->ipath_pageshadow) {
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-			   sizeof tidmap)) {
-		ret = -EFAULT;
-		goto done;
-	}
-
-	porttid = pd->port_port * dd->ipath_rcvtidcnt;
-	if (!pd->port_subport_cnt)
-		tidcnt = dd->ipath_rcvtidcnt;
-	else if (!subport) {
-		tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
-			 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
-		porttid += dd->ipath_rcvtidcnt - tidcnt;
-	} else {
-		tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
-		porttid += tidcnt * (subport - 1);
-	}
-	tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-				   dd->ipath_rcvtidbase +
-				   porttid * sizeof(*tidbase));
-
-	limit = sizeof(tidmap) * BITS_PER_BYTE;
-	if (limit > tidcnt)
-		/* just in case size changes in future */
-		limit = tidcnt;
-	tid = find_first_bit(tidmap, limit);
-	ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
-		   "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
-		   limit, tid, porttid);
-	for (cnt = 0; tid < limit; tid++) {
-		/*
-		 * small optimization; if we detect a run of 3 or so without
-		 * any set, use find_first_bit again.  That's mainly to
-		 * accelerate the case where we wrapped, so we have some at
-		 * the beginning, and some at the end, and a big gap
-		 * in the middle.
-		 */
-		if (!test_bit(tid, tidmap))
-			continue;
-		cnt++;
-		if (dd->ipath_pageshadow[porttid + tid]) {
-			struct page *p;
-			p = dd->ipath_pageshadow[porttid + tid];
-			dd->ipath_pageshadow[porttid + tid] = NULL;
-			ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
-				   pid_nr(pd->port_pid), tid);
-			dd->ipath_f_put_tid(dd, &tidbase[tid],
-					    RCVHQ_RCV_TYPE_EXPECTED,
-					    dd->ipath_tidinvalid);
-			pci_unmap_page(dd->pcidev,
-				dd->ipath_physshadow[porttid + tid],
-				PAGE_SIZE, PCI_DMA_FROMDEVICE);
-			ipath_release_user_pages(&p, 1);
-			ipath_stats.sps_pageunlocks++;
-		} else
-			ipath_dbg("Unused tid %u, ignoring\n", tid);
-	}
-	if (cnt != ti->tidcnt)
-		ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
-			  ti->tidcnt, cnt);
-done:
-	if (ret)
-		ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
-			  ti->tidcnt, -ret);
-	return ret;
-}
-
-/**
- * ipath_set_part_key - set a partition key
- * @pd: the port
- * @key: the key
- *
- * We can have up to 4 active at a time (other than the default, which is
- * always allowed).  This is somewhat tricky, since multiple ports may set
- * the same key, so we reference count them, and clean up at exit.  All 4
- * partition keys are packed into a single infinipath register.  It's an
- * error for a process to set the same pkey multiple times.  We provide no
- * mechanism to de-allocate a pkey at this time, we may eventually need to
- * do that.  I've used the atomic operations, and no locking, and only make
- * a single pass through what's available.  This should be more than
- * adequate for some time. I'll think about spinlocks or the like if and as
- * it's necessary.
- */
-static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	int i, any = 0, pidx = -1;
-	u16 lkey = key & 0x7FFF;
-	int ret;
-
-	if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
-		/* nothing to do; this key always valid */
-		ret = 0;
-		goto bail;
-	}
-
-	ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
-		   "%hx:%x %hx:%x %hx:%x %hx:%x\n",
-		   pd->port_port, key, dd->ipath_pkeys[0],
-		   atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
-		   atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
-		   atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
-		   atomic_read(&dd->ipath_pkeyrefs[3]));
-
-	if (!lkey) {
-		ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
-			   pd->port_port);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/*
-	 * Set the full membership bit, because it has to be
-	 * set in the register or the packet, and it seems
-	 * cleaner to set in the register than to force all
-	 * callers to set it. (see bug 4331)
-	 */
-	key |= 0x8000;
-
-	for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-		if (!pd->port_pkeys[i] && pidx == -1)
-			pidx = i;
-		if (pd->port_pkeys[i] == key) {
-			ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
-				   "(%x) more than once\n",
-				   pd->port_port, key);
-			ret = -EEXIST;
-			goto bail;
-		}
-	}
-	if (pidx == -1) {
-		ipath_dbg("All pkeys for port %u already in use, "
-			  "can't set %x\n", pd->port_port, key);
-		ret = -EBUSY;
-		goto bail;
-	}
-	for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (!dd->ipath_pkeys[i]) {
-			any++;
-			continue;
-		}
-		if (dd->ipath_pkeys[i] == key) {
-			atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
-
-			if (atomic_inc_return(pkrefs) > 1) {
-				pd->port_pkeys[pidx] = key;
-				ipath_cdbg(VERBOSE, "p%u set key %x "
-					   "matches #%d, count now %d\n",
-					   pd->port_port, key, i,
-					   atomic_read(pkrefs));
-				ret = 0;
-				goto bail;
-			} else {
-				/*
-				 * lost race, decrement count, catch below
-				 */
-				atomic_dec(pkrefs);
-				ipath_cdbg(VERBOSE, "Lost race, count was "
-					   "0, after dec, it's %d\n",
-					   atomic_read(pkrefs));
-				any++;
-			}
-		}
-		if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
-			/*
-			 * It makes no sense to have both the limited and
-			 * full membership PKEY set at the same time since
-			 * the unlimited one will disable the limited one.
-			 */
-			ret = -EEXIST;
-			goto bail;
-		}
-	}
-	if (!any) {
-		ipath_dbg("port %u, all pkeys already in use, "
-			  "can't set %x\n", pd->port_port, key);
-		ret = -EBUSY;
-		goto bail;
-	}
-	for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (!dd->ipath_pkeys[i] &&
-		    atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
-			u64 pkey;
-
-			/* for ipathstats, etc. */
-			ipath_stats.sps_pkeys[i] = lkey;
-			pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
-			pkey =
-				(u64) dd->ipath_pkeys[0] |
-				((u64) dd->ipath_pkeys[1] << 16) |
-				((u64) dd->ipath_pkeys[2] << 32) |
-				((u64) dd->ipath_pkeys[3] << 48);
-			ipath_cdbg(PROC, "p%u set key %x in #%d, "
-				   "portidx %d, new pkey reg %llx\n",
-				   pd->port_port, key, i, pidx,
-				   (unsigned long long) pkey);
-			ipath_write_kreg(
-				dd, dd->ipath_kregs->kr_partitionkey, pkey);
-
-			ret = 0;
-			goto bail;
-		}
-	}
-	ipath_dbg("port %u, all pkeys already in use 2nd pass, "
-		  "can't set %x\n", pd->port_port, key);
-	ret = -EBUSY;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_manage_rcvq - manage a port's receive queue
- * @pd: the port
- * @subport: the subport
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the port, for use in queue
- * overflow conditions.  start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
-			     int start_stop)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-
-	ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
-		   start_stop ? "en" : "dis", dd->ipath_unit,
-		   pd->port_port, subport);
-	if (subport)
-		goto bail;
-	/* atomically clear receive enable port. */
-	if (start_stop) {
-		/*
-		 * On enable, force in-memory copy of the tail register to
-		 * 0, so that protocol code doesn't have to worry about
-		 * whether or not the chip has yet updated the in-memory
-		 * copy or not on return from the system call. The chip
-		 * always resets it's tail register back to 0 on a
-		 * transition from disabled to enabled.  This could cause a
-		 * problem if software was broken, and did the enable w/o
-		 * the disable, but eventually the in-memory copy will be
-		 * updated and correct itself, even in the face of software
-		 * bugs.
-		 */
-		if (pd->port_rcvhdrtail_kvaddr)
-			ipath_clear_rcvhdrtail(pd);
-		set_bit(dd->ipath_r_portenable_shift + pd->port_port,
-			&dd->ipath_rcvctrl);
-	} else
-		clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
-			  &dd->ipath_rcvctrl);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-			 dd->ipath_rcvctrl);
-	/* now be sure chip saw it before we return */
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	if (start_stop) {
-		/*
-		 * And try to be sure that tail reg update has happened too.
-		 * This should in theory interlock with the RXE changes to
-		 * the tail register.  Don't assign it to the tail register
-		 * in memory copy, since we could overwrite an update by the
-		 * chip if we did.
-		 */
-		ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-	}
-	/* always; new head should be equal to new tail; see above */
-bail:
-	return 0;
-}
-
-static void ipath_clean_part_key(struct ipath_portdata *pd,
-				 struct ipath_devdata *dd)
-{
-	int i, j, pchanged = 0;
-	u64 oldpkey;
-
-	/* for debugging only */
-	oldpkey = (u64) dd->ipath_pkeys[0] |
-		((u64) dd->ipath_pkeys[1] << 16) |
-		((u64) dd->ipath_pkeys[2] << 32) |
-		((u64) dd->ipath_pkeys[3] << 48);
-
-	for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-		if (!pd->port_pkeys[i])
-			continue;
-		ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
-			   pd->port_pkeys[i]);
-		for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
-			/* check for match independent of the global bit */
-			if ((dd->ipath_pkeys[j] & 0x7fff) !=
-			    (pd->port_pkeys[i] & 0x7fff))
-				continue;
-			if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
-				ipath_cdbg(VERBOSE, "p%u clear key "
-					   "%x matches #%d\n",
-					   pd->port_port,
-					   pd->port_pkeys[i], j);
-				ipath_stats.sps_pkeys[j] =
-					dd->ipath_pkeys[j] = 0;
-				pchanged++;
-			} else {
-				ipath_cdbg(VERBOSE, "p%u key %x matches #%d, "
-					   "but ref still %d\n", pd->port_port,
-					   pd->port_pkeys[i], j,
-					   atomic_read(&dd->ipath_pkeyrefs[j]));
-				break;
-			}
-		}
-		pd->port_pkeys[i] = 0;
-	}
-	if (pchanged) {
-		u64 pkey = (u64) dd->ipath_pkeys[0] |
-			((u64) dd->ipath_pkeys[1] << 16) |
-			((u64) dd->ipath_pkeys[2] << 32) |
-			((u64) dd->ipath_pkeys[3] << 48);
-		ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
-			   "new pkey reg %llx\n", pd->port_port,
-			   (unsigned long long) oldpkey,
-			   (unsigned long long) pkey);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
-				 pkey);
-	}
-}
-
-/*
- * Initialize the port data with the receive buffer sizes
- * so this can be done while the master port is locked.
- * Otherwise, there is a race with a slave opening the port
- * and seeing these fields uninitialized.
- */
-static void init_user_egr_sizes(struct ipath_portdata *pd)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	unsigned egrperchunk, egrcnt, size;
-
-	/*
-	 * to avoid wasting a lot of memory, we allocate 32KB chunks of
-	 * physically contiguous memory, advance through it until used up
-	 * and then allocate more.  Of course, we need memory to store those
-	 * extra pointers, now.  Started out with 256KB, but under heavy
-	 * memory pressure (creating large files and then copying them over
-	 * NFS while doing lots of MPI jobs), we hit some allocation
-	 * failures, even though we can sleep...  (2.6.10) Still get
-	 * failures at 64K.  32K is the lowest we can go without wasting
-	 * additional memory.
-	 */
-	size = 0x8000;
-	egrperchunk = size / dd->ipath_rcvegrbufsize;
-	egrcnt = dd->ipath_rcvegrcnt;
-	pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
-	pd->port_rcvegrbufs_perchunk = egrperchunk;
-	pd->port_rcvegrbuf_size = size;
-}
-
-/**
- * ipath_create_user_egr - allocate eager TID buffers
- * @pd: the port to allocate TID buffers for
- *
- * This routine is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance
- * This is the user port version
- *
- * Allocate the eager TID buffers and program them into infinipath
- * They are no longer completely contiguous, we do multiple allocation
- * calls.
- */
-static int ipath_create_user_egr(struct ipath_portdata *pd)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
-	size_t size;
-	int ret;
-	gfp_t gfp_flags;
-
-	/*
-	 * GFP_USER, but without GFP_FS, so buffer cache can be
-	 * coalesced (we hope); otherwise, even at order 4,
-	 * heavy filesystem activity makes these fail, and we can
-	 * use compound pages.
-	 */
-	gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
-	egrcnt = dd->ipath_rcvegrcnt;
-	/* TID number offset for this port */
-	egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
-	egrsize = dd->ipath_rcvegrbufsize;
-	ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
-		   "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
-
-	chunk = pd->port_rcvegrbuf_chunks;
-	egrperchunk = pd->port_rcvegrbufs_perchunk;
-	size = pd->port_rcvegrbuf_size;
-	pd->port_rcvegrbuf = kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf[0]),
-					   GFP_KERNEL);
-	if (!pd->port_rcvegrbuf) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-	pd->port_rcvegrbuf_phys =
-		kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf_phys[0]),
-			      GFP_KERNEL);
-	if (!pd->port_rcvegrbuf_phys) {
-		ret = -ENOMEM;
-		goto bail_rcvegrbuf;
-	}
-	for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-
-		pd->port_rcvegrbuf[e] = dma_alloc_coherent(
-			&dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
-			gfp_flags);
-
-		if (!pd->port_rcvegrbuf[e]) {
-			ret = -ENOMEM;
-			goto bail_rcvegrbuf_phys;
-		}
-	}
-
-	pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
-
-	for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
-		dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
-		unsigned i;
-
-		for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
-			dd->ipath_f_put_tid(dd, e + egroff +
-					    (u64 __iomem *)
-					    ((char __iomem *)
-					     dd->ipath_kregbase +
-					     dd->ipath_rcvegrbase),
-					    RCVHQ_RCV_TYPE_EAGER, pa);
-			pa += egrsize;
-		}
-		cond_resched();	/* don't hog the cpu */
-	}
-
-	ret = 0;
-	goto bail;
-
-bail_rcvegrbuf_phys:
-	for (e = 0; e < pd->port_rcvegrbuf_chunks &&
-		pd->port_rcvegrbuf[e]; e++) {
-		dma_free_coherent(&dd->pcidev->dev, size,
-				  pd->port_rcvegrbuf[e],
-				  pd->port_rcvegrbuf_phys[e]);
-
-	}
-	kfree(pd->port_rcvegrbuf_phys);
-	pd->port_rcvegrbuf_phys = NULL;
-bail_rcvegrbuf:
-	kfree(pd->port_rcvegrbuf);
-	pd->port_rcvegrbuf = NULL;
-bail:
-	return ret;
-}
-
-
-/* common code for the mappings on dma_alloc_coherent mem */
-static int ipath_mmap_mem(struct vm_area_struct *vma,
-	struct ipath_portdata *pd, unsigned len, int write_ok,
-	void *kvaddr, char *what)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	unsigned long pfn;
-	int ret;
-
-	if ((vma->vm_end - vma->vm_start) > len) {
-		dev_info(&dd->pcidev->dev,
-		         "FAIL on %s: len %lx > %x\n", what,
-			 vma->vm_end - vma->vm_start, len);
-		ret = -EFAULT;
-		goto bail;
-	}
-
-	if (!write_ok) {
-		if (vma->vm_flags & VM_WRITE) {
-			dev_info(&dd->pcidev->dev,
-				 "%s must be mapped readonly\n", what);
-			ret = -EPERM;
-			goto bail;
-		}
-
-		/* don't allow them to later change with mprotect */
-		vma->vm_flags &= ~VM_MAYWRITE;
-	}
-
-	pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
-	ret = remap_pfn_range(vma, vma->vm_start, pfn,
-			      len, vma->vm_page_prot);
-	if (ret)
-		dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
-			 "bytes r%c failed: %d\n", what, pd->port_port,
-			 pfn, len, write_ok?'w':'o', ret);
-	else
-		ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
-			   "r%c\n", what, pd->port_port, pfn, len,
-			   write_ok?'w':'o');
-bail:
-	return ret;
-}
-
-static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
-		     u64 ureg)
-{
-	unsigned long phys;
-	int ret;
-
-	/*
-	 * This is real hardware, so use io_remap.  This is the mechanism
-	 * for the user process to update the head registers for their port
-	 * in the chip.
-	 */
-	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
-		dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
-			 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
-		ret = -EFAULT;
-	} else {
-		phys = dd->ipath_physaddr + ureg;
-		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-		ret = io_remap_pfn_range(vma, vma->vm_start,
-					 phys >> PAGE_SHIFT,
-					 vma->vm_end - vma->vm_start,
-					 vma->vm_page_prot);
-	}
-	return ret;
-}
-
-static int mmap_piobufs(struct vm_area_struct *vma,
-			struct ipath_devdata *dd,
-			struct ipath_portdata *pd,
-			unsigned piobufs, unsigned piocnt)
-{
-	unsigned long phys;
-	int ret;
-
-	/*
-	 * When we map the PIO buffers in the chip, we want to map them as
-	 * writeonly, no read possible.   This prevents access to previous
-	 * process data, and catches users who might try to read the i/o
-	 * space due to a bug.
-	 */
-	if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
-		dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
-			 "reqlen %lx > PAGE\n",
-			 vma->vm_end - vma->vm_start);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	phys = dd->ipath_physaddr + piobufs;
-
-#if defined(__powerpc__)
-	/* There isn't a generic way to specify writethrough mappings */
-	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-	pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
-	pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
-#endif
-
-	/*
-	 * don't allow them to later change to readable with mprotect (for when
-	 * not initially mapped readable, as is normally the case)
-	 */
-	vma->vm_flags &= ~VM_MAYREAD;
-	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-
-	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
-				 vma->vm_end - vma->vm_start,
-				 vma->vm_page_prot);
-bail:
-	return ret;
-}
-
-static int mmap_rcvegrbufs(struct vm_area_struct *vma,
-			   struct ipath_portdata *pd)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	unsigned long start, size;
-	size_t total_size, i;
-	unsigned long pfn;
-	int ret;
-
-	size = pd->port_rcvegrbuf_size;
-	total_size = pd->port_rcvegrbuf_chunks * size;
-	if ((vma->vm_end - vma->vm_start) > total_size) {
-		dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
-			 "reqlen %lx > actual %lx\n",
-			 vma->vm_end - vma->vm_start,
-			 (unsigned long) total_size);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	if (vma->vm_flags & VM_WRITE) {
-		dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
-			 "writable (flags=%lx)\n", vma->vm_flags);
-		ret = -EPERM;
-		goto bail;
-	}
-	/* don't allow them to later change to writeable with mprotect */
-	vma->vm_flags &= ~VM_MAYWRITE;
-
-	start = vma->vm_start;
-
-	for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
-		pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
-		ret = remap_pfn_range(vma, start, pfn, size,
-				      vma->vm_page_prot);
-		if (ret < 0)
-			goto bail;
-	}
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/*
- * ipath_file_vma_fault - handle a VMA page fault.
- */
-static int ipath_file_vma_fault(struct vm_area_struct *vma,
-					struct vm_fault *vmf)
-{
-	struct page *page;
-
-	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
-	if (!page)
-		return VM_FAULT_SIGBUS;
-	get_page(page);
-	vmf->page = page;
-
-	return 0;
-}
-
-static const struct vm_operations_struct ipath_file_vm_ops = {
-	.fault = ipath_file_vma_fault,
-};
-
-static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
-		       struct ipath_portdata *pd, unsigned subport)
-{
-	unsigned long len;
-	struct ipath_devdata *dd;
-	void *addr;
-	size_t size;
-	int ret = 0;
-
-	/* If the port is not shared, all addresses should be physical */
-	if (!pd->port_subport_cnt)
-		goto bail;
-
-	dd = pd->port_dd;
-	size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
-
-	/*
-	 * Each process has all the subport uregbase, rcvhdrq, and
-	 * rcvegrbufs mmapped - as an array for all the processes,
-	 * and also separately for this process.
-	 */
-	if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
-		addr = pd->subport_uregbase;
-		size = PAGE_SIZE * pd->port_subport_cnt;
-	} else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
-		addr = pd->subport_rcvhdr_base;
-		size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
-	} else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
-		addr = pd->subport_rcvegrbuf;
-		size *= pd->port_subport_cnt;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
-                                        PAGE_SIZE * subport)) {
-                addr = pd->subport_uregbase + PAGE_SIZE * subport;
-                size = PAGE_SIZE;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
-                                pd->port_rcvhdrq_size * subport)) {
-                addr = pd->subport_rcvhdr_base +
-                        pd->port_rcvhdrq_size * subport;
-                size = pd->port_rcvhdrq_size;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
-                               size * subport)) {
-                addr = pd->subport_rcvegrbuf + size * subport;
-                /* rcvegrbufs are read-only on the slave */
-                if (vma->vm_flags & VM_WRITE) {
-                        dev_info(&dd->pcidev->dev,
-                                 "Can't map eager buffers as "
-                                 "writable (flags=%lx)\n", vma->vm_flags);
-                        ret = -EPERM;
-                        goto bail;
-                }
-                /*
-                 * Don't allow permission to later change to writeable
-                 * with mprotect.
-                 */
-                vma->vm_flags &= ~VM_MAYWRITE;
-	} else {
-		goto bail;
-	}
-	len = vma->vm_end - vma->vm_start;
-	if (len > size) {
-		ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
-	vma->vm_ops = &ipath_file_vm_ops;
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-	ret = 1;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_mmap - mmap various structures into user space
- * @fp: the file pointer
- * @vma: the VM area
- *
- * We use this to have a shared buffer between the kernel and the user code
- * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
- * buffers in the chip.  We have the open and close entries so we can bump
- * the ref count and keep the driver from being unloaded while still mapped.
- */
-static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
-{
-	struct ipath_portdata *pd;
-	struct ipath_devdata *dd;
-	u64 pgaddr, ureg;
-	unsigned piobufs, piocnt;
-	int ret;
-
-	pd = port_fp(fp);
-	if (!pd) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	dd = pd->port_dd;
-
-	/*
-	 * This is the ipath_do_user_init() code, mapping the shared buffers
-	 * into the user process. The address referred to by vm_pgoff is the
-	 * file offset passed via mmap().  For shared ports, this is the
-	 * kernel vmalloc() address of the pages to share with the master.
-	 * For non-shared or master ports, this is a physical address.
-	 * We only do one mmap for each space mapped.
-	 */
-	pgaddr = vma->vm_pgoff << PAGE_SHIFT;
-
-	/*
-	 * Check for 0 in case one of the allocations failed, but user
-	 * called mmap anyway.
-	 */
-	if (!pgaddr)  {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
-		   (unsigned long long) pgaddr, vma->vm_start,
-		   vma->vm_end - vma->vm_start, dd->ipath_unit,
-		   pd->port_port, subport_fp(fp));
-
-	/*
-	 * Physical addresses must fit in 40 bits for our hardware.
-	 * Check for kernel virtual addresses first, anything else must
-	 * match a HW or memory address.
-	 */
-	ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
-	if (ret) {
-		if (ret > 0)
-			ret = 0;
-		goto bail;
-	}
-
-	ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
-	if (!pd->port_subport_cnt) {
-		/* port is not shared */
-		piocnt = pd->port_piocnt;
-		piobufs = pd->port_piobufs;
-	} else if (!subport_fp(fp)) {
-		/* caller is the master */
-		piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
-			 (pd->port_piocnt % pd->port_subport_cnt);
-		piobufs = pd->port_piobufs +
-			dd->ipath_palign * (pd->port_piocnt - piocnt);
-	} else {
-		unsigned slave = subport_fp(fp) - 1;
-
-		/* caller is a slave */
-		piocnt = pd->port_piocnt / pd->port_subport_cnt;
-		piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
-	}
-
-	if (pgaddr == ureg)
-		ret = mmap_ureg(vma, dd, ureg);
-	else if (pgaddr == piobufs)
-		ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
-	else if (pgaddr == dd->ipath_pioavailregs_phys)
-		/* in-memory copy of pioavail registers */
-		ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
-			      	     (void *) dd->ipath_pioavailregs_dma,
-				     "pioavail registers");
-	else if (pgaddr == pd->port_rcvegr_phys)
-		ret = mmap_rcvegrbufs(vma, pd);
-	else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
-		/*
-		 * The rcvhdrq itself; readonly except on HT (so have
-		 * to allow writable mapping), multiple pages, contiguous
-		 * from an i/o perspective.
-		 */
-		ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
-				     pd->port_rcvhdrq,
-				     "rcvhdrq");
-	else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
-		/* in-memory copy of rcvhdrq tail register */
-		ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
-				     pd->port_rcvhdrtail_kvaddr,
-				     "rcvhdrq tail");
-	else
-		ret = -EINVAL;
-
-	vma->vm_private_data = NULL;
-
-	if (ret < 0)
-		dev_info(&dd->pcidev->dev,
-			 "Failure %d on off %llx len %lx\n",
-			 -ret, (unsigned long long)pgaddr,
-			 vma->vm_end - vma->vm_start);
-bail:
-	return ret;
-}
-
-static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
-{
-	unsigned pollflag = 0;
-
-	if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
-	    pd->port_hdrqfull != pd->port_hdrqfull_poll) {
-		pollflag |= POLLIN | POLLRDNORM;
-		pd->port_hdrqfull_poll = pd->port_hdrqfull;
-	}
-
-	return pollflag;
-}
-
-static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
-				      struct file *fp,
-				      struct poll_table_struct *pt)
-{
-	unsigned pollflag = 0;
-	struct ipath_devdata *dd;
-
-	dd = pd->port_dd;
-
-	/* variable access in ipath_poll_hdrqfull() needs this */
-	rmb();
-	pollflag = ipath_poll_hdrqfull(pd);
-
-	if (pd->port_urgent != pd->port_urgent_poll) {
-		pollflag |= POLLIN | POLLRDNORM;
-		pd->port_urgent_poll = pd->port_urgent;
-	}
-
-	if (!pollflag) {
-		/* this saves a spin_lock/unlock in interrupt handler... */
-		set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
-		/* flush waiting flag so don't miss an event... */
-		wmb();
-		poll_wait(fp, &pd->port_wait, pt);
-	}
-
-	return pollflag;
-}
-
-static unsigned int ipath_poll_next(struct ipath_portdata *pd,
-				    struct file *fp,
-				    struct poll_table_struct *pt)
-{
-	u32 head;
-	u32 tail;
-	unsigned pollflag = 0;
-	struct ipath_devdata *dd;
-
-	dd = pd->port_dd;
-
-	/* variable access in ipath_poll_hdrqfull() needs this */
-	rmb();
-	pollflag = ipath_poll_hdrqfull(pd);
-
-	head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
-	if (pd->port_rcvhdrtail_kvaddr)
-		tail = ipath_get_rcvhdrtail(pd);
-	else
-		tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-
-	if (head != tail)
-		pollflag |= POLLIN | POLLRDNORM;
-	else {
-		/* this saves a spin_lock/unlock in interrupt handler */
-		set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
-		/* flush waiting flag so we don't miss an event */
-		wmb();
-
-		set_bit(pd->port_port + dd->ipath_r_intravail_shift,
-			&dd->ipath_rcvctrl);
-
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-				 dd->ipath_rcvctrl);
-
-		if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
-			ipath_write_ureg(dd, ur_rcvhdrhead,
-					 dd->ipath_rhdrhead_intr_off | head,
-					 pd->port_port);
-
-		poll_wait(fp, &pd->port_wait, pt);
-	}
-
-	return pollflag;
-}
-
-static unsigned int ipath_poll(struct file *fp,
-			       struct poll_table_struct *pt)
-{
-	struct ipath_portdata *pd;
-	unsigned pollflag;
-
-	pd = port_fp(fp);
-	if (!pd)
-		pollflag = 0;
-	else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
-		pollflag = ipath_poll_urgent(pd, fp, pt);
-	else
-		pollflag = ipath_poll_next(pd, fp, pt);
-
-	return pollflag;
-}
-
-static int ipath_supports_subports(int user_swmajor, int user_swminor)
-{
-	/* no subport implementation prior to software version 1.3 */
-	return (user_swmajor > 1) || (user_swminor >= 3);
-}
-
-static int ipath_compatible_subports(int user_swmajor, int user_swminor)
-{
-	/* this code is written long-hand for clarity */
-	if (IPATH_USER_SWMAJOR != user_swmajor) {
-		/* no promise of compatibility if major mismatch */
-		return 0;
-	}
-	if (IPATH_USER_SWMAJOR == 1) {
-		switch (IPATH_USER_SWMINOR) {
-		case 0:
-		case 1:
-		case 2:
-			/* no subport implementation so cannot be compatible */
-			return 0;
-		case 3:
-			/* 3 is only compatible with itself */
-			return user_swminor == 3;
-		default:
-			/* >= 4 are compatible (or are expected to be) */
-			return user_swminor >= 4;
-		}
-	}
-	/* make no promises yet for future major versions */
-	return 0;
-}
-
-static int init_subports(struct ipath_devdata *dd,
-			 struct ipath_portdata *pd,
-			 const struct ipath_user_info *uinfo)
-{
-	int ret = 0;
-	unsigned num_subports;
-	size_t size;
-
-	/*
-	 * If the user is requesting zero subports,
-	 * skip the subport allocation.
-	 */
-	if (uinfo->spu_subport_cnt <= 0)
-		goto bail;
-
-	/* Self-consistency check for ipath_compatible_subports() */
-	if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
-	    !ipath_compatible_subports(IPATH_USER_SWMAJOR,
-				       IPATH_USER_SWMINOR)) {
-		dev_info(&dd->pcidev->dev,
-			 "Inconsistent ipath_compatible_subports()\n");
-		goto bail;
-	}
-
-	/* Check for subport compatibility */
-	if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
-				       uinfo->spu_userversion & 0xffff)) {
-		dev_info(&dd->pcidev->dev,
-			 "Mismatched user version (%d.%d) and driver "
-			 "version (%d.%d) while port sharing. Ensure "
-                         "that driver and library are from the same "
-                         "release.\n",
-			 (int) (uinfo->spu_userversion >> 16),
-                         (int) (uinfo->spu_userversion & 0xffff),
-			 IPATH_USER_SWMAJOR,
-	                 IPATH_USER_SWMINOR);
-		goto bail;
-	}
-	if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	num_subports = uinfo->spu_subport_cnt;
-	pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
-	if (!pd->subport_uregbase) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-	/* Note: pd->port_rcvhdrq_size isn't initialized yet. */
-	size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-		     sizeof(u32), PAGE_SIZE) * num_subports;
-	pd->subport_rcvhdr_base = vzalloc(size);
-	if (!pd->subport_rcvhdr_base) {
-		ret = -ENOMEM;
-		goto bail_ureg;
-	}
-
-	pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
-					pd->port_rcvegrbuf_size *
-					num_subports);
-	if (!pd->subport_rcvegrbuf) {
-		ret = -ENOMEM;
-		goto bail_rhdr;
-	}
-
-	pd->port_subport_cnt = uinfo->spu_subport_cnt;
-	pd->port_subport_id = uinfo->spu_subport_id;
-	pd->active_slaves = 1;
-	set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-	goto bail;
-
-bail_rhdr:
-	vfree(pd->subport_rcvhdr_base);
-bail_ureg:
-	vfree(pd->subport_uregbase);
-	pd->subport_uregbase = NULL;
-bail:
-	return ret;
-}
-
-static int try_alloc_port(struct ipath_devdata *dd, int port,
-			  struct file *fp,
-			  const struct ipath_user_info *uinfo)
-{
-	struct ipath_portdata *pd;
-	int ret;
-
-	if (!(pd = dd->ipath_pd[port])) {
-		void *ptmp;
-
-		pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
-
-		/*
-		 * Allocate memory for use in ipath_tid_update() just once
-		 * at open, not per call.  Reduces cost of expected send
-		 * setup.
-		 */
-		ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
-			       dd->ipath_rcvtidcnt * sizeof(struct page **),
-			       GFP_KERNEL);
-		if (!pd || !ptmp) {
-			ipath_dev_err(dd, "Unable to allocate portdata "
-				      "memory, failing open\n");
-			ret = -ENOMEM;
-			kfree(pd);
-			kfree(ptmp);
-			goto bail;
-		}
-		dd->ipath_pd[port] = pd;
-		dd->ipath_pd[port]->port_port = port;
-		dd->ipath_pd[port]->port_dd = dd;
-		dd->ipath_pd[port]->port_tid_pg_list = ptmp;
-		init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
-	}
-	if (!pd->port_cnt) {
-		pd->userversion = uinfo->spu_userversion;
-		init_user_egr_sizes(pd);
-		if ((ret = init_subports(dd, pd, uinfo)) != 0)
-			goto bail;
-		ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
-			   current->comm, current->pid, dd->ipath_unit,
-			   port);
-		pd->port_cnt = 1;
-		port_fp(fp) = pd;
-		pd->port_pid = get_pid(task_pid(current));
-		strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
-		ipath_stats.sps_ports++;
-		ret = 0;
-	} else
-		ret = -EBUSY;
-
-bail:
-	return ret;
-}
-
-static inline int usable(struct ipath_devdata *dd)
-{
-	return dd &&
-		(dd->ipath_flags & IPATH_PRESENT) &&
-		dd->ipath_kregbase &&
-		dd->ipath_lid &&
-		!(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
-				     | IPATH_LINKUNK));
-}
-
-static int find_free_port(int unit, struct file *fp,
-			  const struct ipath_user_info *uinfo)
-{
-	struct ipath_devdata *dd = ipath_lookup(unit);
-	int ret, i;
-
-	if (!dd) {
-		ret = -ENODEV;
-		goto bail;
-	}
-
-	if (!usable(dd)) {
-		ret = -ENETDOWN;
-		goto bail;
-	}
-
-	for (i = 1; i < dd->ipath_cfgports; i++) {
-		ret = try_alloc_port(dd, i, fp, uinfo);
-		if (ret != -EBUSY)
-			goto bail;
-	}
-	ret = -EBUSY;
-
-bail:
-	return ret;
-}
-
-static int find_best_unit(struct file *fp,
-			  const struct ipath_user_info *uinfo)
-{
-	int ret = 0, i, prefunit = -1, devmax;
-	int maxofallports, npresent, nup;
-	int ndev;
-
-	devmax = ipath_count_units(&npresent, &nup, &maxofallports);
-
-	/*
-	 * This code is present to allow a knowledgeable person to
-	 * specify the layout of processes to processors before opening
-	 * this driver, and then we'll assign the process to the "closest"
-	 * InfiniPath chip to that processor (we assume reasonable connectivity,
-	 * for now).  This code assumes that if affinity has been set
-	 * before this point, that at most one cpu is set; for now this
-	 * is reasonable.  I check for both cpumask_empty() and cpumask_full(),
-	 * in case some kernel variant sets none of the bits when no
-	 * affinity is set.  2.6.11 and 12 kernels have all present
-	 * cpus set.  Some day we'll have to fix it up further to handle
-	 * a cpu subset.  This algorithm fails for two HT chips connected
-	 * in tunnel fashion.  Eventually this needs real topology
-	 * information.  There may be some issues with dual core numbering
-	 * as well.  This needs more work prior to release.
-	 */
-	if (!cpumask_empty(tsk_cpus_allowed(current)) &&
-	    !cpumask_full(tsk_cpus_allowed(current))) {
-		int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
-		get_online_cpus();
-		for_each_online_cpu(i)
-			if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
-				ipath_cdbg(PROC, "%s[%u] affinity set for "
-					   "cpu %d/%d\n", current->comm,
-					   current->pid, i, ncpus);
-				curcpu = i;
-				nset++;
-			}
-		put_online_cpus();
-		if (curcpu != -1 && nset != ncpus) {
-			if (npresent) {
-				prefunit = curcpu / (ncpus / npresent);
-				ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
-					  "%d cpus/chip, select unit %d\n",
-					  current->comm, current->pid,
-					  npresent, ncpus, ncpus / npresent,
-					  prefunit);
-			}
-		}
-	}
-
-	/*
-	 * user ports start at 1, kernel port is 0
-	 * For now, we do round-robin access across all chips
-	 */
-
-	if (prefunit != -1)
-		devmax = prefunit + 1;
-recheck:
-	for (i = 1; i < maxofallports; i++) {
-		for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
-		     ndev++) {
-			struct ipath_devdata *dd = ipath_lookup(ndev);
-
-			if (!usable(dd))
-				continue; /* can't use this unit */
-			if (i >= dd->ipath_cfgports)
-				/*
-				 * Maxed out on users of this unit. Try
-				 * next.
-				 */
-				continue;
-			ret = try_alloc_port(dd, i, fp, uinfo);
-			if (!ret)
-				goto done;
-		}
-	}
-
-	if (npresent) {
-		if (nup == 0) {
-			ret = -ENETDOWN;
-			ipath_dbg("No ports available (none initialized "
-				  "and ready)\n");
-		} else {
-			if (prefunit > 0) {
-				/* if started above 0, retry from 0 */
-				ipath_cdbg(PROC,
-					   "%s[%u] no ports on prefunit "
-					   "%d, clear and re-check\n",
-					   current->comm, current->pid,
-					   prefunit);
-				devmax = ipath_count_units(NULL, NULL,
-							   NULL);
-				prefunit = -1;
-				goto recheck;
-			}
-			ret = -EBUSY;
-			ipath_dbg("No ports available\n");
-		}
-	} else {
-		ret = -ENXIO;
-		ipath_dbg("No boards found\n");
-	}
-
-done:
-	return ret;
-}
-
-static int find_shared_port(struct file *fp,
-			    const struct ipath_user_info *uinfo)
-{
-	int devmax, ndev, i;
-	int ret = 0;
-
-	devmax = ipath_count_units(NULL, NULL, NULL);
-
-	for (ndev = 0; ndev < devmax; ndev++) {
-		struct ipath_devdata *dd = ipath_lookup(ndev);
-
-		if (!usable(dd))
-			continue;
-		for (i = 1; i < dd->ipath_cfgports; i++) {
-			struct ipath_portdata *pd = dd->ipath_pd[i];
-
-			/* Skip ports which are not yet open */
-			if (!pd || !pd->port_cnt)
-				continue;
-			/* Skip port if it doesn't match the requested one */
-			if (pd->port_subport_id != uinfo->spu_subport_id)
-				continue;
-			/* Verify the sharing process matches the master */
-			if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
-			    pd->userversion != uinfo->spu_userversion ||
-			    pd->port_cnt >= pd->port_subport_cnt) {
-				ret = -EINVAL;
-				goto done;
-			}
-			port_fp(fp) = pd;
-			subport_fp(fp) = pd->port_cnt++;
-			pd->port_subpid[subport_fp(fp)] =
-				get_pid(task_pid(current));
-			tidcursor_fp(fp) = 0;
-			pd->active_slaves |= 1 << subport_fp(fp);
-			ipath_cdbg(PROC,
-				   "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
-				   current->comm, current->pid,
-				   subport_fp(fp),
-				   pd->port_comm, pid_nr(pd->port_pid),
-				   dd->ipath_unit, pd->port_port);
-			ret = 1;
-			goto done;
-		}
-	}
-
-done:
-	return ret;
-}
-
-static int ipath_open(struct inode *in, struct file *fp)
-{
-	/* The real work is performed later in ipath_assign_port() */
-	fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
-	return fp->private_data ? 0 : -ENOMEM;
-}
-
-/* Get port early, so can set affinity prior to memory allocation */
-static int ipath_assign_port(struct file *fp,
-			      const struct ipath_user_info *uinfo)
-{
-	int ret;
-	int i_minor;
-	unsigned swmajor, swminor;
-
-	/* Check to be sure we haven't already initialized this file */
-	if (port_fp(fp)) {
-		ret = -EINVAL;
-		goto done;
-	}
-
-	/* for now, if major version is different, bail */
-	swmajor = uinfo->spu_userversion >> 16;
-	if (swmajor != IPATH_USER_SWMAJOR) {
-		ipath_dbg("User major version %d not same as driver "
-			  "major %d\n", uinfo->spu_userversion >> 16,
-			  IPATH_USER_SWMAJOR);
-		ret = -ENODEV;
-		goto done;
-	}
-
-	swminor = uinfo->spu_userversion & 0xffff;
-	if (swminor != IPATH_USER_SWMINOR)
-		ipath_dbg("User minor version %d not same as driver "
-			  "minor %d\n", swminor, IPATH_USER_SWMINOR);
-
-	mutex_lock(&ipath_mutex);
-
-	if (ipath_compatible_subports(swmajor, swminor) &&
-	    uinfo->spu_subport_cnt &&
-	    (ret = find_shared_port(fp, uinfo))) {
-		if (ret > 0)
-			ret = 0;
-		goto done_chk_sdma;
-	}
-
-	i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
-	ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
-		   (long)file_inode(fp)->i_rdev, i_minor);
-
-	if (i_minor)
-		ret = find_free_port(i_minor - 1, fp, uinfo);
-	else
-		ret = find_best_unit(fp, uinfo);
-
-done_chk_sdma:
-	if (!ret) {
-		struct ipath_filedata *fd = fp->private_data;
-		const struct ipath_portdata *pd = fd->pd;
-		const struct ipath_devdata *dd = pd->port_dd;
-
-		fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
-						      dd->ipath_unit,
-						      pd->port_port,
-						      fd->subport);
-
-		if (!fd->pq)
-			ret = -ENOMEM;
-	}
-
-	mutex_unlock(&ipath_mutex);
-
-done:
-	return ret;
-}
-
-
-static int ipath_do_user_init(struct file *fp,
-			      const struct ipath_user_info *uinfo)
-{
-	int ret;
-	struct ipath_portdata *pd = port_fp(fp);
-	struct ipath_devdata *dd;
-	u32 head32;
-
-	/* Subports don't need to initialize anything since master did it. */
-	if (subport_fp(fp)) {
-		ret = wait_event_interruptible(pd->port_wait,
-			!test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
-		goto done;
-	}
-
-	dd = pd->port_dd;
-
-	if (uinfo->spu_rcvhdrsize) {
-		ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
-		if (ret)
-			goto done;
-	}
-
-	/* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
-
-	/* some ports may get extra buffers, calculate that here */
-	if (pd->port_port <= dd->ipath_ports_extrabuf)
-		pd->port_piocnt = dd->ipath_pbufsport + 1;
-	else
-		pd->port_piocnt = dd->ipath_pbufsport;
-
-	/* for right now, kernel piobufs are at end, so port 1 is at 0 */
-	if (pd->port_port <= dd->ipath_ports_extrabuf)
-		pd->port_pio_base = (dd->ipath_pbufsport + 1)
-			* (pd->port_port - 1);
-	else
-		pd->port_pio_base = dd->ipath_ports_extrabuf +
-			dd->ipath_pbufsport * (pd->port_port - 1);
-	pd->port_piobufs = dd->ipath_piobufbase +
-		pd->port_pio_base * dd->ipath_palign;
-	ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
-		" first pio %u\n", pd->port_port, pd->port_piobufs,
-		pd->port_piocnt, pd->port_pio_base);
-	ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
-
-	/*
-	 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
-	 * array for time being.  If pd->port_port > chip-supported,
-	 * we need to do extra stuff here to handle by handling overflow
-	 * through port 0, someday
-	 */
-	ret = ipath_create_rcvhdrq(dd, pd);
-	if (!ret)
-		ret = ipath_create_user_egr(pd);
-	if (ret)
-		goto done;
-
-	/*
-	 * set the eager head register for this port to the current values
-	 * of the tail pointers, since we don't know if they were
-	 * updated on last use of the port.
-	 */
-	head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
-	ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
-	pd->port_lastrcvhdrqtail = -1;
-	ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
-		pd->port_port, head32);
-	pd->port_tidcursor = 0;	/* start at beginning after open */
-
-	/* initialize poll variables... */
-	pd->port_urgent = 0;
-	pd->port_urgent_poll = 0;
-	pd->port_hdrqfull_poll = pd->port_hdrqfull;
-
-	/*
-	 * Now enable the port for receive.
-	 * For chips that are set to DMA the tail register to memory
-	 * when they change (and when the update bit transitions from
-	 * 0 to 1.  So for those chips, we turn it off and then back on.
-	 * This will (very briefly) affect any other open ports, but the
-	 * duration is very short, and therefore isn't an issue.  We
-	 * explicitly set the in-memory tail copy to 0 beforehand, so we
-	 * don't have to wait to be sure the DMA update has happened
-	 * (chip resets head/tail to 0 on transition to enable).
-	 */
-	set_bit(dd->ipath_r_portenable_shift + pd->port_port,
-		&dd->ipath_rcvctrl);
-	if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-		if (pd->port_rcvhdrtail_kvaddr)
-			ipath_clear_rcvhdrtail(pd);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-			dd->ipath_rcvctrl &
-			~(1ULL << dd->ipath_r_tailupd_shift));
-	}
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-			 dd->ipath_rcvctrl);
-	/* Notify any waiting slaves */
-	if (pd->port_subport_cnt) {
-		clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-		wake_up(&pd->port_wait);
-	}
-done:
-	return ret;
-}
-
-/**
- * unlock_exptid - unlock any expected TID entries port still had in use
- * @pd: port
- *
- * We don't actually update the chip here, because we do a bulk update
- * below, using ipath_f_clear_tids.
- */
-static void unlock_expected_tids(struct ipath_portdata *pd)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
-	int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
-
-	ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
-		   pd->port_port);
-	for (i = port_tidbase; i < maxtid; i++) {
-		struct page *ps = dd->ipath_pageshadow[i];
-
-		if (!ps)
-			continue;
-
-		dd->ipath_pageshadow[i] = NULL;
-		pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
-			PAGE_SIZE, PCI_DMA_FROMDEVICE);
-		ipath_release_user_pages_on_close(&ps, 1);
-		cnt++;
-		ipath_stats.sps_pageunlocks++;
-	}
-	if (cnt)
-		ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
-			   pd->port_port, cnt);
-
-	if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
-		ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
-			   (unsigned long long) ipath_stats.sps_pagelocks,
-			   (unsigned long long)
-			   ipath_stats.sps_pageunlocks);
-}
-
-static int ipath_close(struct inode *in, struct file *fp)
-{
-	struct ipath_filedata *fd;
-	struct ipath_portdata *pd;
-	struct ipath_devdata *dd;
-	unsigned long flags;
-	unsigned port;
-	struct pid *pid;
-
-	ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
-		   (long)in->i_rdev, fp->private_data);
-
-	mutex_lock(&ipath_mutex);
-
-	fd = fp->private_data;
-	fp->private_data = NULL;
-	pd = fd->pd;
-	if (!pd) {
-		mutex_unlock(&ipath_mutex);
-		goto bail;
-	}
-
-	dd = pd->port_dd;
-
-	/* drain user sdma queue */
-	ipath_user_sdma_queue_drain(dd, fd->pq);
-	ipath_user_sdma_queue_destroy(fd->pq);
-
-	if (--pd->port_cnt) {
-		/*
-		 * XXX If the master closes the port before the slave(s),
-		 * revoke the mmap for the eager receive queue so
-		 * the slave(s) don't wait for receive data forever.
-		 */
-		pd->active_slaves &= ~(1 << fd->subport);
-		put_pid(pd->port_subpid[fd->subport]);
-		pd->port_subpid[fd->subport] = NULL;
-		mutex_unlock(&ipath_mutex);
-		goto bail;
-	}
-	/* early; no interrupt users after this */
-	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-	port = pd->port_port;
-	dd->ipath_pd[port] = NULL;
-	pid = pd->port_pid;
-	pd->port_pid = NULL;
-	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
-	if (pd->port_rcvwait_to || pd->port_piowait_to
-	    || pd->port_rcvnowait || pd->port_pionowait) {
-		ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
-			   "%u rcv %u, pio already\n",
-			   pd->port_port, pd->port_rcvwait_to,
-			   pd->port_piowait_to, pd->port_rcvnowait,
-			   pd->port_pionowait);
-		pd->port_rcvwait_to = pd->port_piowait_to =
-			pd->port_rcvnowait = pd->port_pionowait = 0;
-	}
-	if (pd->port_flag) {
-		ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
-			  pd->port_port, pd->port_flag);
-		pd->port_flag = 0;
-	}
-
-	if (dd->ipath_kregbase) {
-		/* atomically clear receive enable port and intr avail. */
-		clear_bit(dd->ipath_r_portenable_shift + port,
-			  &dd->ipath_rcvctrl);
-		clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
-			  &dd->ipath_rcvctrl);
-		ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
-			dd->ipath_rcvctrl);
-		/* and read back from chip to be sure that nothing
-		 * else is in flight when we do the rest */
-		(void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-		/* clean up the pkeys for this port user */
-		ipath_clean_part_key(pd, dd);
-		/*
-		 * be paranoid, and never write 0's to these, just use an
-		 * unused part of the port 0 tail page.  Of course,
-		 * rcvhdraddr points to a large chunk of memory, so this
-		 * could still trash things, but at least it won't trash
-		 * page 0, and by disabling the port, it should stop "soon",
-		 * even if a packet or two is in already in flight after we
-		 * disabled the port.
-		 */
-		ipath_write_kreg_port(dd,
-		        dd->ipath_kregs->kr_rcvhdrtailaddr, port,
-			dd->ipath_dummy_hdrq_phys);
-		ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
-			pd->port_port, dd->ipath_dummy_hdrq_phys);
-
-		ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
-		ipath_chg_pioavailkernel(dd, pd->port_pio_base,
-			pd->port_piocnt, 1);
-
-		dd->ipath_f_clear_tids(dd, pd->port_port);
-
-		if (dd->ipath_pageshadow)
-			unlock_expected_tids(pd);
-		ipath_stats.sps_ports--;
-		ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
-			   pd->port_comm, pid_nr(pid),
-			   dd->ipath_unit, port);
-	}
-
-	put_pid(pid);
-	mutex_unlock(&ipath_mutex);
-	ipath_free_pddata(dd, pd); /* after releasing the mutex */
-
-bail:
-	kfree(fd);
-	return 0;
-}
-
-static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
-			   struct ipath_port_info __user *uinfo)
-{
-	struct ipath_port_info info;
-	int nup;
-	int ret;
-	size_t sz;
-
-	(void) ipath_count_units(NULL, &nup, NULL);
-	info.num_active = nup;
-	info.unit = pd->port_dd->ipath_unit;
-	info.port = pd->port_port;
-	info.subport = subport;
-	/* Don't return new fields if old library opened the port. */
-	if (ipath_supports_subports(pd->userversion >> 16,
-				    pd->userversion & 0xffff)) {
-		/* Number of user ports available for this device. */
-		info.num_ports = pd->port_dd->ipath_cfgports - 1;
-		info.num_subports = pd->port_subport_cnt;
-		sz = sizeof(info);
-	} else
-		sz = sizeof(info) - 2 * sizeof(u16);
-
-	if (copy_to_user(uinfo, &info, sz)) {
-		ret = -EFAULT;
-		goto bail;
-	}
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-static int ipath_get_slave_info(struct ipath_portdata *pd,
-				void __user *slave_mask_addr)
-{
-	int ret = 0;
-
-	if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
-		ret = -EFAULT;
-	return ret;
-}
-
-static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
-				   u32 __user *inflightp)
-{
-	const u32 val = ipath_user_sdma_inflight_counter(pq);
-
-	if (put_user(val, inflightp))
-		return -EFAULT;
-
-	return 0;
-}
-
-static int ipath_sdma_get_complete(struct ipath_devdata *dd,
-				   struct ipath_user_sdma_queue *pq,
-				   u32 __user *completep)
-{
-	u32 val;
-	int err;
-
-	err = ipath_user_sdma_make_progress(dd, pq);
-	if (err < 0)
-		return err;
-
-	val = ipath_user_sdma_complete_counter(pq);
-	if (put_user(val, completep))
-		return -EFAULT;
-
-	return 0;
-}
-
-static ssize_t ipath_write(struct file *fp, const char __user *data,
-			   size_t count, loff_t *off)
-{
-	const struct ipath_cmd __user *ucmd;
-	struct ipath_portdata *pd;
-	const void __user *src;
-	size_t consumed, copy;
-	struct ipath_cmd cmd;
-	ssize_t ret = 0;
-	void *dest;
-
-	if (count < sizeof(cmd.type)) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	ucmd = (const struct ipath_cmd __user *) data;
-
-	if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
-		ret = -EFAULT;
-		goto bail;
-	}
-
-	consumed = sizeof(cmd.type);
-
-	switch (cmd.type) {
-	case IPATH_CMD_ASSIGN_PORT:
-	case __IPATH_CMD_USER_INIT:
-	case IPATH_CMD_USER_INIT:
-		copy = sizeof(cmd.cmd.user_info);
-		dest = &cmd.cmd.user_info;
-		src = &ucmd->cmd.user_info;
-		break;
-	case IPATH_CMD_RECV_CTRL:
-		copy = sizeof(cmd.cmd.recv_ctrl);
-		dest = &cmd.cmd.recv_ctrl;
-		src = &ucmd->cmd.recv_ctrl;
-		break;
-	case IPATH_CMD_PORT_INFO:
-		copy = sizeof(cmd.cmd.port_info);
-		dest = &cmd.cmd.port_info;
-		src = &ucmd->cmd.port_info;
-		break;
-	case IPATH_CMD_TID_UPDATE:
-	case IPATH_CMD_TID_FREE:
-		copy = sizeof(cmd.cmd.tid_info);
-		dest = &cmd.cmd.tid_info;
-		src = &ucmd->cmd.tid_info;
-		break;
-	case IPATH_CMD_SET_PART_KEY:
-		copy = sizeof(cmd.cmd.part_key);
-		dest = &cmd.cmd.part_key;
-		src = &ucmd->cmd.part_key;
-		break;
-	case __IPATH_CMD_SLAVE_INFO:
-		copy = sizeof(cmd.cmd.slave_mask_addr);
-		dest = &cmd.cmd.slave_mask_addr;
-		src = &ucmd->cmd.slave_mask_addr;
-		break;
-	case IPATH_CMD_PIOAVAILUPD:	// force an update of PIOAvail reg
-		copy = 0;
-		src = NULL;
-		dest = NULL;
-		break;
-	case IPATH_CMD_POLL_TYPE:
-		copy = sizeof(cmd.cmd.poll_type);
-		dest = &cmd.cmd.poll_type;
-		src = &ucmd->cmd.poll_type;
-		break;
-	case IPATH_CMD_ARMLAUNCH_CTRL:
-		copy = sizeof(cmd.cmd.armlaunch_ctrl);
-		dest = &cmd.cmd.armlaunch_ctrl;
-		src = &ucmd->cmd.armlaunch_ctrl;
-		break;
-	case IPATH_CMD_SDMA_INFLIGHT:
-		copy = sizeof(cmd.cmd.sdma_inflight);
-		dest = &cmd.cmd.sdma_inflight;
-		src = &ucmd->cmd.sdma_inflight;
-		break;
-	case IPATH_CMD_SDMA_COMPLETE:
-		copy = sizeof(cmd.cmd.sdma_complete);
-		dest = &cmd.cmd.sdma_complete;
-		src = &ucmd->cmd.sdma_complete;
-		break;
-	default:
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	if (copy) {
-		if ((count - consumed) < copy) {
-			ret = -EINVAL;
-			goto bail;
-		}
-
-		if (copy_from_user(dest, src, copy)) {
-			ret = -EFAULT;
-			goto bail;
-		}
-
-		consumed += copy;
-	}
-
-	pd = port_fp(fp);
-	if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
-		cmd.type != IPATH_CMD_ASSIGN_PORT) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	switch (cmd.type) {
-	case IPATH_CMD_ASSIGN_PORT:
-		ret = ipath_assign_port(fp, &cmd.cmd.user_info);
-		if (ret)
-			goto bail;
-		break;
-	case __IPATH_CMD_USER_INIT:
-		/* backwards compatibility, get port first */
-		ret = ipath_assign_port(fp, &cmd.cmd.user_info);
-		if (ret)
-			goto bail;
-		/* and fall through to current version. */
-	case IPATH_CMD_USER_INIT:
-		ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
-		if (ret)
-			goto bail;
-		ret = ipath_get_base_info(
-			fp, (void __user *) (unsigned long)
-			cmd.cmd.user_info.spu_base_info,
-			cmd.cmd.user_info.spu_base_info_size);
-		break;
-	case IPATH_CMD_RECV_CTRL:
-		ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
-		break;
-	case IPATH_CMD_PORT_INFO:
-		ret = ipath_port_info(pd, subport_fp(fp),
-				      (struct ipath_port_info __user *)
-				      (unsigned long) cmd.cmd.port_info);
-		break;
-	case IPATH_CMD_TID_UPDATE:
-		ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
-		break;
-	case IPATH_CMD_TID_FREE:
-		ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
-		break;
-	case IPATH_CMD_SET_PART_KEY:
-		ret = ipath_set_part_key(pd, cmd.cmd.part_key);
-		break;
-	case __IPATH_CMD_SLAVE_INFO:
-		ret = ipath_get_slave_info(pd,
-					   (void __user *) (unsigned long)
-					   cmd.cmd.slave_mask_addr);
-		break;
-	case IPATH_CMD_PIOAVAILUPD:
-		ipath_force_pio_avail_update(pd->port_dd);
-		break;
-	case IPATH_CMD_POLL_TYPE:
-		pd->poll_type = cmd.cmd.poll_type;
-		break;
-	case IPATH_CMD_ARMLAUNCH_CTRL:
-		if (cmd.cmd.armlaunch_ctrl)
-			ipath_enable_armlaunch(pd->port_dd);
-		else
-			ipath_disable_armlaunch(pd->port_dd);
-		break;
-	case IPATH_CMD_SDMA_INFLIGHT:
-		ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
-					      (u32 __user *) (unsigned long)
-					      cmd.cmd.sdma_inflight);
-		break;
-	case IPATH_CMD_SDMA_COMPLETE:
-		ret = ipath_sdma_get_complete(pd->port_dd,
-					      user_sdma_queue_fp(fp),
-					      (u32 __user *) (unsigned long)
-					      cmd.cmd.sdma_complete);
-		break;
-	}
-
-	if (ret >= 0)
-		ret = consumed;
-
-bail:
-	return ret;
-}
-
-static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
-	struct file *filp = iocb->ki_filp;
-	struct ipath_filedata *fp = filp->private_data;
-	struct ipath_portdata *pd = port_fp(filp);
-	struct ipath_user_sdma_queue *pq = fp->pq;
-
-	if (!iter_is_iovec(from) || !from->nr_segs)
-		return -EINVAL;
-
-	return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
-}
-
-static struct class *ipath_class;
-
-static int init_cdev(int minor, char *name, const struct file_operations *fops,
-		     struct cdev **cdevp, struct device **devp)
-{
-	const dev_t dev = MKDEV(IPATH_MAJOR, minor);
-	struct cdev *cdev = NULL;
-	struct device *device = NULL;
-	int ret;
-
-	cdev = cdev_alloc();
-	if (!cdev) {
-		printk(KERN_ERR IPATH_DRV_NAME
-		       ": Could not allocate cdev for minor %d, %s\n",
-		       minor, name);
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	cdev->owner = THIS_MODULE;
-	cdev->ops = fops;
-	kobject_set_name(&cdev->kobj, name);
-
-	ret = cdev_add(cdev, dev, 1);
-	if (ret < 0) {
-		printk(KERN_ERR IPATH_DRV_NAME
-		       ": Could not add cdev for minor %d, %s (err %d)\n",
-		       minor, name, -ret);
-		goto err_cdev;
-	}
-
-	device = device_create(ipath_class, NULL, dev, NULL, name);
-
-	if (IS_ERR(device)) {
-		ret = PTR_ERR(device);
-		printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
-		       "device for minor %d, %s (err %d)\n",
-		       minor, name, -ret);
-		goto err_cdev;
-	}
-
-	goto done;
-
-err_cdev:
-	cdev_del(cdev);
-	cdev = NULL;
-
-done:
-	if (ret >= 0) {
-		*cdevp = cdev;
-		*devp = device;
-	} else {
-		*cdevp = NULL;
-		*devp = NULL;
-	}
-
-	return ret;
-}
-
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
-		    struct cdev **cdevp, struct device **devp)
-{
-	return init_cdev(minor, name, fops, cdevp, devp);
-}
-
-static void cleanup_cdev(struct cdev **cdevp,
-			 struct device **devp)
-{
-	struct device *dev = *devp;
-
-	if (dev) {
-		device_unregister(dev);
-		*devp = NULL;
-	}
-
-	if (*cdevp) {
-		cdev_del(*cdevp);
-		*cdevp = NULL;
-	}
-}
-
-void ipath_cdev_cleanup(struct cdev **cdevp,
-			struct device **devp)
-{
-	cleanup_cdev(cdevp, devp);
-}
-
-static struct cdev *wildcard_cdev;
-static struct device *wildcard_dev;
-
-static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
-
-static int user_init(void)
-{
-	int ret;
-
-	ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
-	if (ret < 0) {
-		printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
-		       "chrdev region (err %d)\n", -ret);
-		goto done;
-	}
-
-	ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
-
-	if (IS_ERR(ipath_class)) {
-		ret = PTR_ERR(ipath_class);
-		printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
-		       "device class (err %d)\n", -ret);
-		goto bail;
-	}
-
-	goto done;
-bail:
-	unregister_chrdev_region(dev, IPATH_NMINORS);
-done:
-	return ret;
-}
-
-static void user_cleanup(void)
-{
-	if (ipath_class) {
-		class_destroy(ipath_class);
-		ipath_class = NULL;
-	}
-
-	unregister_chrdev_region(dev, IPATH_NMINORS);
-}
-
-static atomic_t user_count = ATOMIC_INIT(0);
-static atomic_t user_setup = ATOMIC_INIT(0);
-
-int ipath_user_add(struct ipath_devdata *dd)
-{
-	char name[10];
-	int ret;
-
-	if (atomic_inc_return(&user_count) == 1) {
-		ret = user_init();
-		if (ret < 0) {
-			ipath_dev_err(dd, "Unable to set up user support: "
-				      "error %d\n", -ret);
-			goto bail;
-		}
-		ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
-				&wildcard_dev);
-		if (ret < 0) {
-			ipath_dev_err(dd, "Could not create wildcard "
-				      "minor: error %d\n", -ret);
-			goto bail_user;
-		}
-
-		atomic_set(&user_setup, 1);
-	}
-
-	snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
-
-	ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
-			&dd->user_cdev, &dd->user_dev);
-	if (ret < 0)
-		ipath_dev_err(dd, "Could not create user minor %d, %s\n",
-			      dd->ipath_unit + 1, name);
-
-	goto bail;
-
-bail_user:
-	user_cleanup();
-bail:
-	return ret;
-}
-
-void ipath_user_remove(struct ipath_devdata *dd)
-{
-	cleanup_cdev(&dd->user_cdev, &dd->user_dev);
-
-	if (atomic_dec_return(&user_count) == 0) {
-		if (atomic_read(&user_setup) == 0)
-			goto bail;
-
-		cleanup_cdev(&wildcard_cdev, &wildcard_dev);
-		user_cleanup();
-
-		atomic_set(&user_setup, 0);
-	}
-bail:
-	return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_fs.c b/drivers/staging/rdma/ipath/ipath_fs.c
deleted file mode 100644
index 476fcdf..0000000
--- a/drivers/staging/rdma/ipath/ipath_fs.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-#define IPATHFS_MAGIC 0x726a77
-
-static struct super_block *ipath_super;
-
-static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
-			 umode_t mode, const struct file_operations *fops,
-			 void *data)
-{
-	int error;
-	struct inode *inode = new_inode(dir->i_sb);
-
-	if (!inode) {
-		error = -EPERM;
-		goto bail;
-	}
-
-	inode->i_ino = get_next_ino();
-	inode->i_mode = mode;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	inode->i_private = data;
-	if (S_ISDIR(mode)) {
-		inode->i_op = &simple_dir_inode_operations;
-		inc_nlink(inode);
-		inc_nlink(dir);
-	}
-
-	inode->i_fop = fops;
-
-	d_instantiate(dentry, inode);
-	error = 0;
-
-bail:
-	return error;
-}
-
-static int create_file(const char *name, umode_t mode,
-		       struct dentry *parent, struct dentry **dentry,
-		       const struct file_operations *fops, void *data)
-{
-	int error;
-
-	inode_lock(d_inode(parent));
-	*dentry = lookup_one_len(name, parent, strlen(name));
-	if (!IS_ERR(*dentry))
-		error = ipathfs_mknod(d_inode(parent), *dentry,
-				      mode, fops, data);
-	else
-		error = PTR_ERR(*dentry);
-	inode_unlock(d_inode(parent));
-
-	return error;
-}
-
-static ssize_t atomic_stats_read(struct file *file, char __user *buf,
-				 size_t count, loff_t *ppos)
-{
-	return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
-				       sizeof ipath_stats);
-}
-
-static const struct file_operations atomic_stats_ops = {
-	.read = atomic_stats_read,
-	.llseek = default_llseek,
-};
-
-static ssize_t atomic_counters_read(struct file *file, char __user *buf,
-				    size_t count, loff_t *ppos)
-{
-	struct infinipath_counters counters;
-	struct ipath_devdata *dd;
-
-	dd = file_inode(file)->i_private;
-	dd->ipath_f_read_counters(dd, &counters);
-
-	return simple_read_from_buffer(buf, count, ppos, &counters,
-				       sizeof counters);
-}
-
-static const struct file_operations atomic_counters_ops = {
-	.read = atomic_counters_read,
-	.llseek = default_llseek,
-};
-
-static ssize_t flash_read(struct file *file, char __user *buf,
-			  size_t count, loff_t *ppos)
-{
-	struct ipath_devdata *dd;
-	ssize_t ret;
-	loff_t pos;
-	char *tmp;
-
-	pos = *ppos;
-
-	if ( pos < 0) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	if (pos >= sizeof(struct ipath_flash)) {
-		ret = 0;
-		goto bail;
-	}
-
-	if (count > sizeof(struct ipath_flash) - pos)
-		count = sizeof(struct ipath_flash) - pos;
-
-	tmp = kmalloc(count, GFP_KERNEL);
-	if (!tmp) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	dd = file_inode(file)->i_private;
-	if (ipath_eeprom_read(dd, pos, tmp, count)) {
-		ipath_dev_err(dd, "failed to read from flash\n");
-		ret = -ENXIO;
-		goto bail_tmp;
-	}
-
-	if (copy_to_user(buf, tmp, count)) {
-		ret = -EFAULT;
-		goto bail_tmp;
-	}
-
-	*ppos = pos + count;
-	ret = count;
-
-bail_tmp:
-	kfree(tmp);
-
-bail:
-	return ret;
-}
-
-static ssize_t flash_write(struct file *file, const char __user *buf,
-			   size_t count, loff_t *ppos)
-{
-	struct ipath_devdata *dd;
-	ssize_t ret;
-	loff_t pos;
-	char *tmp;
-
-	pos = *ppos;
-
-	if (pos != 0) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	if (count != sizeof(struct ipath_flash)) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	tmp = memdup_user(buf, count);
-	if (IS_ERR(tmp))
-		return PTR_ERR(tmp);
-
-	dd = file_inode(file)->i_private;
-	if (ipath_eeprom_write(dd, pos, tmp, count)) {
-		ret = -ENXIO;
-		ipath_dev_err(dd, "failed to write to flash\n");
-		goto bail_tmp;
-	}
-
-	*ppos = pos + count;
-	ret = count;
-
-bail_tmp:
-	kfree(tmp);
-
-bail:
-	return ret;
-}
-
-static const struct file_operations flash_ops = {
-	.read = flash_read,
-	.write = flash_write,
-	.llseek = default_llseek,
-};
-
-static int create_device_files(struct super_block *sb,
-			       struct ipath_devdata *dd)
-{
-	struct dentry *dir, *tmp;
-	char unit[10];
-	int ret;
-
-	snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
-	ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
-			  &simple_dir_operations, dd);
-	if (ret) {
-		printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
-		goto bail;
-	}
-
-	ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
-			  &atomic_counters_ops, dd);
-	if (ret) {
-		printk(KERN_ERR "create_file(%s/atomic_counters) "
-		       "failed: %d\n", unit, ret);
-		goto bail;
-	}
-
-	ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
-			  &flash_ops, dd);
-	if (ret) {
-		printk(KERN_ERR "create_file(%s/flash) "
-		       "failed: %d\n", unit, ret);
-		goto bail;
-	}
-
-bail:
-	return ret;
-}
-
-static int remove_file(struct dentry *parent, char *name)
-{
-	struct dentry *tmp;
-	int ret;
-
-	tmp = lookup_one_len(name, parent, strlen(name));
-
-	if (IS_ERR(tmp)) {
-		ret = PTR_ERR(tmp);
-		goto bail;
-	}
-
-	spin_lock(&tmp->d_lock);
-	if (simple_positive(tmp)) {
-		dget_dlock(tmp);
-		__d_drop(tmp);
-		spin_unlock(&tmp->d_lock);
-		simple_unlink(d_inode(parent), tmp);
-	} else
-		spin_unlock(&tmp->d_lock);
-
-	ret = 0;
-bail:
-	/*
-	 * We don't expect clients to care about the return value, but
-	 * it's there if they need it.
-	 */
-	return ret;
-}
-
-static int remove_device_files(struct super_block *sb,
-			       struct ipath_devdata *dd)
-{
-	struct dentry *dir, *root;
-	char unit[10];
-	int ret;
-
-	root = dget(sb->s_root);
-	inode_lock(d_inode(root));
-	snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
-	dir = lookup_one_len(unit, root, strlen(unit));
-
-	if (IS_ERR(dir)) {
-		ret = PTR_ERR(dir);
-		printk(KERN_ERR "Lookup of %s failed\n", unit);
-		goto bail;
-	}
-
-	remove_file(dir, "flash");
-	remove_file(dir, "atomic_counters");
-	d_delete(dir);
-	ret = simple_rmdir(d_inode(root), dir);
-
-bail:
-	inode_unlock(d_inode(root));
-	dput(root);
-	return ret;
-}
-
-static int ipathfs_fill_super(struct super_block *sb, void *data,
-			      int silent)
-{
-	struct ipath_devdata *dd, *tmp;
-	unsigned long flags;
-	int ret;
-
-	static struct tree_descr files[] = {
-		[2] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
-		{""},
-	};
-
-	ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
-	if (ret) {
-		printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
-		goto bail;
-	}
-
-	spin_lock_irqsave(&ipath_devs_lock, flags);
-
-	list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
-		spin_unlock_irqrestore(&ipath_devs_lock, flags);
-		ret = create_device_files(sb, dd);
-		if (ret)
-			goto bail;
-		spin_lock_irqsave(&ipath_devs_lock, flags);
-	}
-
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-bail:
-	return ret;
-}
-
-static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
-			int flags, const char *dev_name, void *data)
-{
-	struct dentry *ret;
-	ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
-	if (!IS_ERR(ret))
-		ipath_super = ret->d_sb;
-	return ret;
-}
-
-static void ipathfs_kill_super(struct super_block *s)
-{
-	kill_litter_super(s);
-	ipath_super = NULL;
-}
-
-int ipathfs_add_device(struct ipath_devdata *dd)
-{
-	int ret;
-
-	if (ipath_super == NULL) {
-		ret = 0;
-		goto bail;
-	}
-
-	ret = create_device_files(ipath_super, dd);
-
-bail:
-	return ret;
-}
-
-int ipathfs_remove_device(struct ipath_devdata *dd)
-{
-	int ret;
-
-	if (ipath_super == NULL) {
-		ret = 0;
-		goto bail;
-	}
-
-	ret = remove_device_files(ipath_super, dd);
-
-bail:
-	return ret;
-}
-
-static struct file_system_type ipathfs_fs_type = {
-	.owner =	THIS_MODULE,
-	.name =		"ipathfs",
-	.mount =	ipathfs_mount,
-	.kill_sb =	ipathfs_kill_super,
-};
-MODULE_ALIAS_FS("ipathfs");
-
-int __init ipath_init_ipathfs(void)
-{
-	return register_filesystem(&ipathfs_fs_type);
-}
-
-void __exit ipath_exit_ipathfs(void)
-{
-	unregister_filesystem(&ipathfs_fs_type);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_iba6110.c b/drivers/staging/rdma/ipath/ipath_iba6110.c
deleted file mode 100644
index 5f13572..0000000
--- a/drivers/staging/rdma/ipath/ipath_iba6110.c
+++ /dev/null
@@ -1,1939 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the InfiniPath
- * HT chip.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/htirq.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64);
-
-
-/*
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- *
- * The names are in InterCap form because they're taken straight from
- * the chip specification.  Since they're only used in this file, they
- * don't pollute the rest of the source.
-*/
-
-struct _infinipath_do_not_use_kernel_regs {
-	unsigned long long Revision;
-	unsigned long long Control;
-	unsigned long long PageAlign;
-	unsigned long long PortCnt;
-	unsigned long long DebugPortSelect;
-	unsigned long long DebugPort;
-	unsigned long long SendRegBase;
-	unsigned long long UserRegBase;
-	unsigned long long CounterRegBase;
-	unsigned long long Scratch;
-	unsigned long long ReservedMisc1;
-	unsigned long long InterruptConfig;
-	unsigned long long IntBlocked;
-	unsigned long long IntMask;
-	unsigned long long IntStatus;
-	unsigned long long IntClear;
-	unsigned long long ErrorMask;
-	unsigned long long ErrorStatus;
-	unsigned long long ErrorClear;
-	unsigned long long HwErrMask;
-	unsigned long long HwErrStatus;
-	unsigned long long HwErrClear;
-	unsigned long long HwDiagCtrl;
-	unsigned long long MDIO;
-	unsigned long long IBCStatus;
-	unsigned long long IBCCtrl;
-	unsigned long long ExtStatus;
-	unsigned long long ExtCtrl;
-	unsigned long long GPIOOut;
-	unsigned long long GPIOMask;
-	unsigned long long GPIOStatus;
-	unsigned long long GPIOClear;
-	unsigned long long RcvCtrl;
-	unsigned long long RcvBTHQP;
-	unsigned long long RcvHdrSize;
-	unsigned long long RcvHdrCnt;
-	unsigned long long RcvHdrEntSize;
-	unsigned long long RcvTIDBase;
-	unsigned long long RcvTIDCnt;
-	unsigned long long RcvEgrBase;
-	unsigned long long RcvEgrCnt;
-	unsigned long long RcvBufBase;
-	unsigned long long RcvBufSize;
-	unsigned long long RxIntMemBase;
-	unsigned long long RxIntMemSize;
-	unsigned long long RcvPartitionKey;
-	unsigned long long ReservedRcv[10];
-	unsigned long long SendCtrl;
-	unsigned long long SendPIOBufBase;
-	unsigned long long SendPIOSize;
-	unsigned long long SendPIOBufCnt;
-	unsigned long long SendPIOAvailAddr;
-	unsigned long long TxIntMemBase;
-	unsigned long long TxIntMemSize;
-	unsigned long long ReservedSend[9];
-	unsigned long long SendBufferError;
-	unsigned long long SendBufferErrorCONT1;
-	unsigned long long SendBufferErrorCONT2;
-	unsigned long long SendBufferErrorCONT3;
-	unsigned long long ReservedSBE[4];
-	unsigned long long RcvHdrAddr0;
-	unsigned long long RcvHdrAddr1;
-	unsigned long long RcvHdrAddr2;
-	unsigned long long RcvHdrAddr3;
-	unsigned long long RcvHdrAddr4;
-	unsigned long long RcvHdrAddr5;
-	unsigned long long RcvHdrAddr6;
-	unsigned long long RcvHdrAddr7;
-	unsigned long long RcvHdrAddr8;
-	unsigned long long ReservedRHA[7];
-	unsigned long long RcvHdrTailAddr0;
-	unsigned long long RcvHdrTailAddr1;
-	unsigned long long RcvHdrTailAddr2;
-	unsigned long long RcvHdrTailAddr3;
-	unsigned long long RcvHdrTailAddr4;
-	unsigned long long RcvHdrTailAddr5;
-	unsigned long long RcvHdrTailAddr6;
-	unsigned long long RcvHdrTailAddr7;
-	unsigned long long RcvHdrTailAddr8;
-	unsigned long long ReservedRHTA[7];
-	unsigned long long Sync;	/* Software only */
-	unsigned long long Dump;	/* Software only */
-	unsigned long long SimVer;	/* Software only */
-	unsigned long long ReservedSW[5];
-	unsigned long long SerdesConfig0;
-	unsigned long long SerdesConfig1;
-	unsigned long long SerdesStatus;
-	unsigned long long XGXSConfig;
-	unsigned long long ReservedSW2[4];
-};
-
-struct _infinipath_do_not_use_counters {
-	__u64 LBIntCnt;
-	__u64 LBFlowStallCnt;
-	__u64 Reserved1;
-	__u64 TxUnsupVLErrCnt;
-	__u64 TxDataPktCnt;
-	__u64 TxFlowPktCnt;
-	__u64 TxDwordCnt;
-	__u64 TxLenErrCnt;
-	__u64 TxMaxMinLenErrCnt;
-	__u64 TxUnderrunCnt;
-	__u64 TxFlowStallCnt;
-	__u64 TxDroppedPktCnt;
-	__u64 RxDroppedPktCnt;
-	__u64 RxDataPktCnt;
-	__u64 RxFlowPktCnt;
-	__u64 RxDwordCnt;
-	__u64 RxLenErrCnt;
-	__u64 RxMaxMinLenErrCnt;
-	__u64 RxICRCErrCnt;
-	__u64 RxVCRCErrCnt;
-	__u64 RxFlowCtrlErrCnt;
-	__u64 RxBadFormatCnt;
-	__u64 RxLinkProblemCnt;
-	__u64 RxEBPCnt;
-	__u64 RxLPCRCErrCnt;
-	__u64 RxBufOvflCnt;
-	__u64 RxTIDFullErrCnt;
-	__u64 RxTIDValidErrCnt;
-	__u64 RxPKeyMismatchCnt;
-	__u64 RxP0HdrEgrOvflCnt;
-	__u64 RxP1HdrEgrOvflCnt;
-	__u64 RxP2HdrEgrOvflCnt;
-	__u64 RxP3HdrEgrOvflCnt;
-	__u64 RxP4HdrEgrOvflCnt;
-	__u64 RxP5HdrEgrOvflCnt;
-	__u64 RxP6HdrEgrOvflCnt;
-	__u64 RxP7HdrEgrOvflCnt;
-	__u64 RxP8HdrEgrOvflCnt;
-	__u64 Reserved6;
-	__u64 Reserved7;
-	__u64 IBStatusChangeCnt;
-	__u64 IBLinkErrRecoveryCnt;
-	__u64 IBLinkDownedCnt;
-	__u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
-	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
-	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_ht_kregs = {
-	.kr_control = IPATH_KREG_OFFSET(Control),
-	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
-	.kr_debugport = IPATH_KREG_OFFSET(DebugPort),
-	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
-	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
-	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
-	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
-	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
-	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
-	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
-	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
-	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
-	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
-	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
-	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
-	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
-	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
-	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
-	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
-	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
-	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
-	.kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
-	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
-	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
-	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
-	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
-	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
-	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
-	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
-	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
-	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
-	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
-	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
-	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
-	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
-	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
-	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
-	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
-	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
-	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
-	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
-	.kr_revision = IPATH_KREG_OFFSET(Revision),
-	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
-	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
-	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
-	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
-	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
-	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
-	.kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
-	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
-	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
-	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
-	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-	.kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
-	.kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
-	.kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
-	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-	/*
-	 * These should not be used directly via ipath_write_kreg64(),
-	 * use them with ipath_write_kreg64_port(),
-	 */
-	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
-	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
-};
-
-static const struct ipath_cregs ipath_ht_cregs = {
-	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
-	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
-	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
-	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
-	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
-	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
-	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
-	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
-	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
-	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
-	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
-	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
-	/* calc from Reg_CounterRegBase + offset */
-	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
-	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
-	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
-	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
-	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
-	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
-	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
-	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
-	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
-	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
-	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
-	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
-	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
-	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
-	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
-	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
-	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
-	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
-	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
-	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
-	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
-#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR   0x0000000000800000ULL
-#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR   0x0000000001000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR   0x0000000002000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR   0x0000000004000000ULL
-#define INFINIPATH_HWE_HTCMISCERR4          0x0000000008000000ULL
-#define INFINIPATH_HWE_HTCMISCERR5          0x0000000010000000ULL
-#define INFINIPATH_HWE_HTCMISCERR6          0x0000000020000000ULL
-#define INFINIPATH_HWE_HTCMISCERR7          0x0000000040000000ULL
-#define INFINIPATH_HWE_HTCBUSTREQPARITYERR  0x0000000080000000ULL
-#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
-#define INFINIPATH_HWE_HTCBUSIREQPARITYERR  0x0000000200000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_FBSLIP        0x0200000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_RFSLIP        0x0400000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_FBSLIP        0x0800000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_RFSLIP        0x1000000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED      0x2000000000000000ULL
-
-#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6110_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_CORRECT     0x0000000000008000
-
-
-/* TID entries (memory), HT-only */
-#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL	/* 40 bits valid */
-#define INFINIPATH_RT_VALID 0x8000000000000000ULL
-#define INFINIPATH_RT_ADDR_SHIFT 0
-#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
-#define INFINIPATH_RT_BUFSIZE_SHIFT 48
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET          0x7ULL
-
-/*
- * masks and bits that are different in different chips, or present only
- * in one
- */
-static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
-    INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
-static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
-    INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
-
-static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA \
-	(1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL \
-	(1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-/* keep the code below somewhat more readable; not used elsewhere */
-#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |	\
-				infinipath_hwe_htclnkabyte1crcerr)
-#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr |	\
-				infinipath_hwe_htclnkbbyte1crcerr)
-#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |	\
-				infinipath_hwe_htclnkbbyte0crcerr)
-#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr |	\
-				infinipath_hwe_htclnkbbyte1crcerr)
-
-static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
-			  char *msg, size_t msgl)
-{
-	char bitsmsg[64];
-	ipath_err_t crcbits = hwerrs &
-		(_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
-	/* don't check if 8bit HT */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-		crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
-	/* don't check if 8bit HT */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-		crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
-	/*
-	 * we'll want to ignore link errors on link that is
-	 * not in use, if any.  For now, complain about both
-	 */
-	if (crcbits) {
-		u16 ctrl0, ctrl1;
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[HT%s lane %s CRC (%llx); powercycle to completely clear]",
-			 !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
-			 "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
-				    ? "1 (B)" : "0+1 (A+B)"),
-			 !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
-			 : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
-			    "0+1"), (unsigned long long) crcbits);
-		strlcat(msg, bitsmsg, msgl);
-
-		/*
-		 * print extra info for debugging.  slave/primary
-		 * config word 4, 8 (link control 0, 1)
-		 */
-
-		if (pci_read_config_word(dd->pcidev,
-					 dd->ipath_ht_slave_off + 0x4,
-					 &ctrl0))
-			dev_info(&dd->pcidev->dev, "Couldn't read "
-				 "linkctrl0 of slave/primary "
-				 "config block\n");
-		else if (!(ctrl0 & 1 << 6))
-			/* not if EOC bit set */
-			ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
-				  ((ctrl0 >> 8) & 7) ? " CRC" : "",
-				  ((ctrl0 >> 4) & 1) ? "linkfail" :
-				  "");
-		if (pci_read_config_word(dd->pcidev,
-					 dd->ipath_ht_slave_off + 0x8,
-					 &ctrl1))
-			dev_info(&dd->pcidev->dev, "Couldn't read "
-				 "linkctrl1 of slave/primary "
-				 "config block\n");
-		else if (!(ctrl1 & 1 << 6))
-			/* not if EOC bit set */
-			ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
-				  ((ctrl1 >> 8) & 7) ? " CRC" : "",
-				  ((ctrl1 >> 4) & 1) ? "linkfail" :
-				  "");
-
-		/* disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~crcbits;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-		ipath_dbg("HT crc errs: %s\n", msg);
-	} else
-		ipath_dbg("ignoring HT crc errors 0x%llx, "
-			  "not in use\n", (unsigned long long)
-			  (hwerrs & (_IPATH_HTLINK0_CRCBITS |
-				     _IPATH_HTLINK1_CRCBITS)));
-}
-
-/* 6110 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
-	INFINIPATH_HWE_MSG(HTCBUSIREQPARITYERR, "HTC Ireq Parity"),
-	INFINIPATH_HWE_MSG(HTCBUSTREQPARITYERR, "HTC Treq Parity"),
-	INFINIPATH_HWE_MSG(HTCBUSTRESPPARITYERR, "HTC Tresp Parity"),
-	INFINIPATH_HWE_MSG(HTCMISCERR5, "HT core Misc5"),
-	INFINIPATH_HWE_MSG(HTCMISCERR6, "HT core Misc6"),
-	INFINIPATH_HWE_MSG(HTCMISCERR7, "HT core Misc7"),
-	INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
-	INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
-		        INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
-		        << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
-			  << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_ht_txe_recover(struct ipath_devdata *dd)
-{
-	++ipath_stats.sps_txeparity;
-	dev_info(&dd->pcidev->dev,
-		"Recovering from TXE PIO parity error\n");
-}
-
-
-/**
- * ipath_ht_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use.  Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue.  We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
-				     size_t msgl)
-{
-	ipath_err_t hwerrs;
-	u32 bits, ctrl;
-	int isfatal = 0;
-	char bitsmsg[64];
-	int log_idx;
-
-	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-
-	if (!hwerrs) {
-		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
-		/*
-		 * better than printing cofusing messages
-		 * This seems to be related to clearing the crc error, or
-		 * the pll error during init.
-		 */
-		goto bail;
-	} else if (hwerrs == -1LL) {
-		ipath_dev_err(dd, "Read of hardware error status failed "
-			      "(all bits set); ignoring\n");
-		goto bail;
-	}
-	ipath_stats.sps_hwerrs++;
-
-	/* Always clear the error status register, except MEMBISTFAIL,
-	 * regardless of whether we continue or stop using the chip.
-	 * We want that set so we know it failed, even across driver reload.
-	 * We'll still ignore it in the hwerrmask.  We do this partly for
-	 * diagnostics, but also for support */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
-	hwerrs &= dd->ipath_hwerrmask;
-
-	/* We log some errors to EEPROM, check if we have any of those. */
-	for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
-		if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
-			ipath_inc_eeprom_err(dd, log_idx, 1);
-
-	/*
-	 * make sure we get this much out, unless told to be quiet,
-	 * it's a parity error we may recover from,
-	 * or it's occurred within the last 5 seconds
-	 */
-	if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
-		RXE_EAGER_PARITY)) ||
-		(ipath_debug & __IPATH_VERBDBG))
-		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
-			 "(cleared)\n", (unsigned long long) hwerrs);
-	dd->ipath_lasthwerror |= hwerrs;
-
-	if (hwerrs & ~dd->ipath_hwe_bitsextant)
-		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
-			      "%llx set\n", (unsigned long long)
-			      (hwerrs & ~dd->ipath_hwe_bitsextant));
-
-	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-	if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
-		/*
-		 * parity errors in send memory are recoverable,
-		 * just cancel the send (if indicated in * sendbuffererror),
-		 * count the occurrence, unfreeze (if no other handled
-		 * hardware error bits are set), and continue. They can
-		 * occur if a processor speculative read is done to the PIO
-		 * buffer while we are sending a packet, for example.
-		 */
-		if (hwerrs & TXE_PIO_PARITY) {
-			ipath_ht_txe_recover(dd);
-			hwerrs &= ~TXE_PIO_PARITY;
-		}
-
-		if (!hwerrs) {
-			ipath_dbg("Clearing freezemode on ignored or "
-				  "recovered hardware error\n");
-			ipath_clear_freeze(dd);
-		}
-	}
-
-	*msg = '\0';
-
-	/*
-	 * may someday want to decode into which bits are which
-	 * functional area for parity errors, etc.
-	 */
-	if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
-		      << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
-			 bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-
-	ipath_format_hwerrors(hwerrs,
-			      ipath_6110_hwerror_msgs,
-			      ARRAY_SIZE(ipath_6110_hwerror_msgs),
-			      msg, msgl);
-
-	if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
-		hwerr_crcbits(dd, hwerrs, msg, msgl);
-
-	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
-		strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
-			msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
-			 INFINIPATH_HWE_COREPLL_RFSLIP |	\
-			 INFINIPATH_HWE_HTBPLL_FBSLIP |		\
-			 INFINIPATH_HWE_HTBPLL_RFSLIP |		\
-			 INFINIPATH_HWE_HTAPLL_FBSLIP |		\
-			 INFINIPATH_HWE_HTAPLL_RFSLIP)
-
-	if (hwerrs & _IPATH_PLL_FAIL) {
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PLL failed (%llx), InfiniPath hardware unusable]",
-			 (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
-		strlcat(msg, bitsmsg, msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
-		/*
-		 * If it occurs, it is left masked since the eternal
-		 * interface is unused
-		 */
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs) {
-		/*
-		 * if any set that we aren't ignoring; only
-		 * make the complaint once, in case it's stuck
-		 * or recurring, and we get here multiple
-		 * times.
-		 * force link down, so switch knows, and
-		 * LEDs are turned off
-		 */
-		if (dd->ipath_flags & IPATH_INITTED) {
-			ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-			ipath_setup_ht_setextled(dd,
-				INFINIPATH_IBCS_L_STATE_DOWN,
-				INFINIPATH_IBCS_LT_STATE_DISABLED);
-			ipath_dev_err(dd, "Fatal Hardware Error (freeze "
-					  "mode), no longer usable, SN %.16s\n",
-					  dd->ipath_serial);
-			isfatal = 1;
-		}
-		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-		/* mark as having had error */
-		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-		/*
-		 * mark as not usable, at a minimum until driver
-		 * is reloaded, probably until reboot, since no
-		 * other reset is possible.
-		 */
-		dd->ipath_flags &= ~IPATH_INITTED;
-	} else {
-		*msg = 0; /* recovered from all of them */
-	}
-	if (*msg)
-		ipath_dev_err(dd, "%s hardware error\n", msg);
-	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
-		/*
-		 * for status file; if no trailing brace is copied,
-		 * we'll know it was truncated.
-		 */
-		snprintf(dd->ipath_freezemsg,
-			 dd->ipath_freezelen, "{%s}", msg);
-
-bail:;
-}
-
-/**
- * ipath_ht_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * fill in the board name, based on the board revision register
- */
-static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
-			      size_t namelen)
-{
-	char *n = NULL;
-	u8 boardrev = dd->ipath_boardrev;
-	int ret = 0;
-
-	switch (boardrev) {
-	case 5:
-		/*
-		 * original production board; two production levels, with
-		 * different serial number ranges.   See ipath_ht_early_init() for
-		 * case where we enable IPATH_GPIO_INTR for later serial # range.
-		 * Original 112* serial number is no longer supported.
-		 */
-		n = "InfiniPath_QHT7040";
-		break;
-	case 7:
-		/* small form factor production board */
-		n = "InfiniPath_QHT7140";
-		break;
-	default:		/* don't know, just print the number */
-		ipath_dev_err(dd, "Don't yet know about board "
-			      "with ID %u\n", boardrev);
-		snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
-			 boardrev);
-		break;
-	}
-	if (n)
-		snprintf(name, namelen, "%s", n);
-
-	if (ret) {
-		ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name);
-		goto bail;
-	}
-	if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
-		dd->ipath_minrev > 4)) {
-		/*
-		 * This version of the driver only supports Rev 3.2 - 3.4
-		 */
-		ipath_dev_err(dd,
-			      "Unsupported InfiniPath hardware revision %u.%u!\n",
-			      dd->ipath_majrev, dd->ipath_minrev);
-		ret = 1;
-		goto bail;
-	}
-	/*
-	 * pkt/word counters are 32 bit, and therefore wrap fast enough
-	 * that we snapshot them from a timer, and maintain 64 bit shadow
-	 * copies
-	 */
-	dd->ipath_flags |= IPATH_32BITCOUNTERS;
-	dd->ipath_flags |= IPATH_GPIO_INTR;
-	if (dd->ipath_lbus_speed != 800)
-		ipath_dev_err(dd,
-			      "Incorrectly configured for HT @ %uMHz\n",
-			      dd->ipath_lbus_speed);
-
-	/*
-	 * set here, not in ipath_init_*_funcs because we have to do
-	 * it after we can read chip registers.
-	 */
-	dd->ipath_ureg_align =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
-bail:
-	return ret;
-}
-
-static void ipath_check_htlink(struct ipath_devdata *dd)
-{
-	u8 linkerr, link_off, i;
-
-	for (i = 0; i < 2; i++) {
-		link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
-		if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
-			dev_info(&dd->pcidev->dev, "Couldn't read "
-				 "linkerror%d of HT slave/primary block\n",
-				 i);
-		else if (linkerr & 0xf0) {
-			ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-				   "clearing\n", linkerr >> 4, i);
-			/*
-			 * writing the linkerr bits that are set should
-			 * clear them
-			 */
-			if (pci_write_config_byte(dd->pcidev, link_off,
-						  linkerr))
-				ipath_dbg("Failed write to clear HT "
-					  "linkerror%d\n", i);
-			if (pci_read_config_byte(dd->pcidev, link_off,
-						 &linkerr))
-				dev_info(&dd->pcidev->dev,
-					 "Couldn't reread linkerror%d of "
-					 "HT slave/primary block\n", i);
-			else if (linkerr & 0xf0)
-				dev_info(&dd->pcidev->dev,
-					 "HT linkerror%d bits 0x%x "
-					 "couldn't be cleared\n",
-					 i, linkerr >> 4);
-		}
-	}
-}
-
-static int ipath_setup_ht_reset(struct ipath_devdata *dd)
-{
-	ipath_dbg("No reset possible for this InfiniPath hardware\n");
-	return 0;
-}
-
-#define HT_INTR_DISC_CONFIG  0x80	/* HT interrupt and discovery cap */
-#define HT_INTR_REG_INDEX    2	/* intconfig requires indirect accesses */
-
-/*
- * Bits 13-15 of command==0 is slave/primary block.  Clear any HT CRC
- * errors.  We only bother to do this at load time, because it's OK if
- * it happened before we were loaded (first time after boot/reset),
- * but any time after that, it's fatal anyway.  Also need to not check
- * for upper byte errors if we are in 8 bit mode, so figure out
- * our width.  For now, at least, also complain if it's 8 bit.
- */
-static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
-			     int pos, u8 cap_type)
-{
-	u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
-	u16 linkctrl = 0;
-	int i;
-
-	dd->ipath_ht_slave_off = pos;
-	/* command word, master_host bit */
-	/* master host || slave */
-	if ((cap_type >> 2) & 1)
-		link_a_b_off = 4;
-	else
-		link_a_b_off = 0;
-	ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
-		   link_a_b_off ? 1 : 0,
-		   link_a_b_off ? 'B' : 'A');
-
-	link_a_b_off += pos;
-
-	/*
-	 * check both link control registers; clear both HT CRC sets if
-	 * necessary.
-	 */
-	for (i = 0; i < 2; i++) {
-		link_off = pos + i * 4 + 0x4;
-		if (pci_read_config_word(pdev, link_off, &linkctrl))
-			ipath_dev_err(dd, "Couldn't read HT link control%d "
-				      "register\n", i);
-		else if (linkctrl & (0xf << 8)) {
-			ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
-				   "bits %x\n", i, linkctrl & (0xf << 8));
-			/*
-			 * now write them back to clear the error.
-			 */
-			pci_write_config_word(pdev, link_off,
-					      linkctrl & (0xf << 8));
-		}
-	}
-
-	/*
-	 * As with HT CRC bits, same for protocol errors that might occur
-	 * during boot.
-	 */
-	for (i = 0; i < 2; i++) {
-		link_off = pos + i * 4 + 0xd;
-		if (pci_read_config_byte(pdev, link_off, &linkerr))
-			dev_info(&pdev->dev, "Couldn't read linkerror%d "
-				 "of HT slave/primary block\n", i);
-		else if (linkerr & 0xf0) {
-			ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-				   "clearing\n", linkerr >> 4, i);
-			/*
-			 * writing the linkerr bits that are set will clear
-			 * them
-			 */
-			if (pci_write_config_byte
-			    (pdev, link_off, linkerr))
-				ipath_dbg("Failed write to clear HT "
-					  "linkerror%d\n", i);
-			if (pci_read_config_byte(pdev, link_off, &linkerr))
-				dev_info(&pdev->dev, "Couldn't reread "
-					 "linkerror%d of HT slave/primary "
-					 "block\n", i);
-			else if (linkerr & 0xf0)
-				dev_info(&pdev->dev, "HT linkerror%d bits "
-					 "0x%x couldn't be cleared\n",
-					 i, linkerr >> 4);
-		}
-	}
-
-	/*
-	 * this is just for our link to the host, not devices connected
-	 * through tunnel.
-	 */
-
-	if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
-		ipath_dev_err(dd, "Couldn't read HT link width "
-			      "config register\n");
-	else {
-		u32 width;
-		switch (linkwidth & 7) {
-		case 5:
-			width = 4;
-			break;
-		case 4:
-			width = 2;
-			break;
-		case 3:
-			width = 32;
-			break;
-		case 1:
-			width = 16;
-			break;
-		case 0:
-		default:	/* if wrong, assume 8 bit */
-			width = 8;
-			break;
-		}
-
-		dd->ipath_lbus_width = width;
-
-		if (linkwidth != 0x11) {
-			ipath_dev_err(dd, "Not configured for 16 bit HT "
-				      "(%x)\n", linkwidth);
-			if (!(linkwidth & 0xf)) {
-				ipath_dbg("Will ignore HT lane1 errors\n");
-				dd->ipath_flags |= IPATH_8BIT_IN_HT0;
-			}
-		}
-	}
-
-	/*
-	 * this is just for our link to the host, not devices connected
-	 * through tunnel.
-	 */
-	if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
-		ipath_dev_err(dd, "Couldn't read HT link frequency "
-			      "config register\n");
-	else {
-		u32 speed;
-		switch (linkwidth & 0xf) {
-		case 6:
-			speed = 1000;
-			break;
-		case 5:
-			speed = 800;
-			break;
-		case 4:
-			speed = 600;
-			break;
-		case 3:
-			speed = 500;
-			break;
-		case 2:
-			speed = 400;
-			break;
-		case 1:
-			speed = 300;
-			break;
-		default:
-			/*
-			 * assume reserved and vendor-specific are 200...
-			 */
-		case 0:
-			speed = 200;
-			break;
-		}
-		dd->ipath_lbus_speed = speed;
-	}
-
-	snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
-		"HyperTransport,%uMHz,x%u\n",
-		dd->ipath_lbus_speed,
-		dd->ipath_lbus_width);
-}
-
-static int ipath_ht_intconfig(struct ipath_devdata *dd)
-{
-	int ret;
-
-	if (dd->ipath_intconfig) {
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
-				 dd->ipath_intconfig);	/* interrupt address */
-		ret = 0;
-	} else {
-		ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
-			      "interrupt address\n");
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
-				struct ht_irq_msg *msg)
-{
-	struct ipath_devdata *dd = pci_get_drvdata(dev);
-	u64 prev_intconfig = dd->ipath_intconfig;
-
-	dd->ipath_intconfig = msg->address_lo;
-	dd->ipath_intconfig |= ((u64) msg->address_hi) << 32;
-
-	/*
-	 * If the previous value of dd->ipath_intconfig is zero, we're
-	 * getting configured for the first time, and must not program the
-	 * intconfig register here (it will be programmed later, when the
-	 * hardware is ready).  Otherwise, we should.
-	 */
-	if (prev_intconfig)
-		ipath_ht_intconfig(dd);
-}
-
-/**
- * ipath_setup_ht_config - setup the interruptconfig register
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * setup the interruptconfig register from the HT config info.
- * Also clear CRC errors in HT linkcontrol, if necessary.
- * This is done only for the real hardware.  It is done before
- * chip address space is initted, so can't touch infinipath registers
- */
-static int ipath_setup_ht_config(struct ipath_devdata *dd,
-				 struct pci_dev *pdev)
-{
-	int pos, ret;
-
-	ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update);
-	if (ret < 0) {
-		ipath_dev_err(dd, "Couldn't create interrupt handler: "
-			      "err %d\n", ret);
-		goto bail;
-	}
-	dd->ipath_irq = ret;
-	ret = 0;
-
-	/*
-	 * Handle clearing CRC errors in linkctrl register if necessary.  We
-	 * do this early, before we ever enable errors or hardware errors,
-	 * mostly to avoid causing the chip to enter freeze mode.
-	 */
-	pos = pci_find_capability(pdev, PCI_CAP_ID_HT);
-	if (!pos) {
-		ipath_dev_err(dd, "Couldn't find HyperTransport "
-			      "capability; no interrupts\n");
-		ret = -ENODEV;
-		goto bail;
-	}
-	do {
-		u8 cap_type;
-
-		/*
-		 * The HT capability type byte is 3 bytes after the
-		 * capability byte.
-		 */
-		if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
-			dev_info(&pdev->dev, "Couldn't read config "
-				 "command @ %d\n", pos);
-			continue;
-		}
-		if (!(cap_type & 0xE0))
-			slave_or_pri_blk(dd, pdev, pos, cap_type);
-	} while ((pos = pci_find_next_capability(pdev, pos,
-						 PCI_CAP_ID_HT)));
-
-	dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Called during driver unload.
- * This is currently a nop for the HT chip, not for all chips
- */
-static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
-{
-}
-
-/**
- * ipath_setup_ht_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * Set the state of the two external LEDs, to indicate physical and
- * logical state of IB link.   For this chip (at least with recommended
- * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
- * (logical state)
- *
- * Note:  We try to match the Mellanox HCA LED behavior as best
- * we can.  Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate.  That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
-				     u64 lst, u64 ltst)
-{
-	u64 extctl;
-	unsigned long flags = 0;
-
-	/* the diags use the LED to indicate diag info, so we leave
-	 * the external LED alone when the diags are running */
-	if (ipath_diag_inuse)
-		return;
-
-	/* Allow override of LED display for, e.g. Locating system in rack */
-	if (dd->ipath_led_override) {
-		ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
-			? INFINIPATH_IBCS_LT_STATE_LINKUP
-			: INFINIPATH_IBCS_LT_STATE_DISABLED;
-		lst = (dd->ipath_led_override & IPATH_LED_LOG)
-			? INFINIPATH_IBCS_L_STATE_ACTIVE
-			: INFINIPATH_IBCS_L_STATE_DOWN;
-	}
-
-	spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-	/*
-	 * start by setting both LED control bits to off, then turn
-	 * on the appropriate bit(s).
-	 */
-	if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
-		/*
-		 * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
-		 * is inverted,  because it is normally used to indicate
-		 * a hardware fault at reset, if there were errors
-		 */
-		extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
-			| INFINIPATH_EXTC_LEDGBLERR_OFF;
-		if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-			extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
-		if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-			extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
-	} else {
-		extctl = dd->ipath_extctrl &
-			~(INFINIPATH_EXTC_LED1PRIPORT_ON |
-			  INFINIPATH_EXTC_LED2PRIPORT_ON);
-		if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-			extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
-		if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-			extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
-	}
-	dd->ipath_extctrl = extctl;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
-	spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-static void ipath_init_ht_variables(struct ipath_devdata *dd)
-{
-	/*
-	 * setup the register offsets, since they are different for each
-	 * chip
-	 */
-	dd->ipath_kregs = &ipath_ht_kregs;
-	dd->ipath_cregs = &ipath_ht_cregs;
-
-	dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
-	dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
-	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
-	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
-	/*
-	 * Fill in data for field-values that change in newer chips.
-	 * We dynamically specify only the mask for LINKTRAININGSTATE
-	 * and only the shift for LINKSTATE, as they are the only ones
-	 * that change.  Also precalculate the 3 link states of interest
-	 * and the combined mask.
-	 */
-	dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
-	dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
-	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
-		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
-	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
-	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
-	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
-	/*
-	 * Fill in data for ibcc field-values that change in newer chips.
-	 * We dynamically specify only the mask for LINKINITCMD
-	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
-	 * the only ones that change.
-	 */
-	dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
-	dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
-	dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
-	/* Fill in shifts for RcvCtrl. */
-	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
-	dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
-	dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
-	dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
-
-	dd->ipath_i_bitsextant =
-		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
-		(INFINIPATH_I_RCVAVAIL_MASK <<
-		 INFINIPATH_I_RCVAVAIL_SHIFT) |
-		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
-		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
-
-	dd->ipath_e_bitsextant =
-		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
-		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
-		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
-		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
-		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
-		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
-		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
-		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
-		INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
-		INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
-		INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
-		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
-		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
-		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
-		INFINIPATH_E_HARDWARE;
-
-	dd->ipath_hwe_bitsextant =
-		(INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
-		INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
-		INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
-		INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
-		INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
-		INFINIPATH_HWE_HTCMISCERR4 |
-		INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
-		INFINIPATH_HWE_HTCMISCERR7 |
-		INFINIPATH_HWE_HTCBUSTREQPARITYERR |
-		INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
-		INFINIPATH_HWE_HTCBUSIREQPARITYERR |
-		INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
-		INFINIPATH_HWE_MEMBISTFAILED |
-		INFINIPATH_HWE_COREPLL_FBSLIP |
-		INFINIPATH_HWE_COREPLL_RFSLIP |
-		INFINIPATH_HWE_HTBPLL_FBSLIP |
-		INFINIPATH_HWE_HTBPLL_RFSLIP |
-		INFINIPATH_HWE_HTAPLL_FBSLIP |
-		INFINIPATH_HWE_HTAPLL_RFSLIP |
-		INFINIPATH_HWE_SERDESPLLFAILED |
-		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
-		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
-
-	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
-	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
-	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
-	/*
-	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
-	 * 2 is Some Misc, 3 is reserved for future.
-	 */
-	dd->ipath_eep_st_masks[0].hwerrs_to_log =
-		INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-		INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
-	dd->ipath_eep_st_masks[1].hwerrs_to_log =
-		INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
-	dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
-	dd->delay_mult = 2; /* SDR, 4X, can't change */
-
-	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
-	dd->ipath_link_speed_supported = IPATH_IB_SDR;
-	dd->ipath_link_width_enabled = IB_WIDTH_4X;
-	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
-	/* these can't change for this chip, so set once */
-	dd->ipath_link_width_active = dd->ipath_link_width_enabled;
-	dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
-}
-
-/**
- * ipath_ht_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
-{
-	ipath_err_t val;
-	u64 extsval;
-
-	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
-	if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
-		ipath_dev_err(dd, "MemBIST did not complete!\n");
-	if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT)
-		ipath_dbg("MemBIST corrected\n");
-
-	ipath_check_htlink(dd);
-
-	/* barring bugs, all hwerrors become interrupts, which can */
-	val = -1LL;
-	/* don't look at crc lane1 if 8 bit */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-		val &= ~infinipath_hwe_htclnkabyte1crcerr;
-	/* don't look at crc lane1 if 8 bit */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-		val &= ~infinipath_hwe_htclnkbbyte1crcerr;
-
-	/*
-	 * disable RXDSYNCMEMPARITY because external serdes is unused,
-	 * and therefore the logic will never be used or initialized,
-	 * and uninitialized state will normally result in this error
-	 * being asserted.  Similarly for the external serdess pll
-	 * lock signal.
-	 */
-	val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
-		 INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
-
-	/*
-	 * Disable MISCERR4 because of an inversion in the HT core
-	 * logic checking for errors that cause this bit to be set.
-	 * The errata can also cause the protocol error bit to be set
-	 * in the HT config space linkerror register(s).
-	 */
-	val &= ~INFINIPATH_HWE_HTCMISCERR4;
-
-	/*
-	 * PLL ignored because unused MDIO interface has a logic problem
-	 */
-	if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
-		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-	dd->ipath_hwerrmask = val;
-}
-
-
-
-
-/**
- * ipath_ht_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
-{
-	u64 val, config1;
-	int ret = 0, change = 0;
-
-	ipath_dbg("Trying to bringup serdes\n");
-
-	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
-	    INFINIPATH_HWE_SERDESPLLFAILED)
-	{
-		ipath_dbg("At start, serdes PLL failed bit set in "
-			  "hwerrstatus, clearing and continuing\n");
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-				 INFINIPATH_HWE_SERDESPLLFAILED);
-	}
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-	config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
-	ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
-		   "config1=%llx, sstatus=%llx xgxs %llx\n",
-		   (unsigned long long) val, (unsigned long long) config1,
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	/* force reset on */
-	val |= INFINIPATH_SERDC0_RESET_PLL
-		/* | INFINIPATH_SERDC0_RESET_MASK */
-		;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	udelay(15);		/* need pll reset set at least for a bit */
-
-	if (val & INFINIPATH_SERDC0_RESET_PLL) {
-		u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
-		/* set lane resets, and tx idle, during pll reset */
-		val2 |= INFINIPATH_SERDC0_RESET_MASK |
-			INFINIPATH_SERDC0_TXIDLE;
-		ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
-			   "%llx)\n", (unsigned long long) val2);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-				 val2);
-		/*
-		 * be sure chip saw it
-		 */
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		/*
-		 * need pll reset clear at least 11 usec before lane
-		 * resets cleared; give it a few more
-		 */
-		udelay(15);
-		val = val2;	/* for check below */
-	}
-
-	if (val & (INFINIPATH_SERDC0_RESET_PLL |
-		   INFINIPATH_SERDC0_RESET_MASK |
-		   INFINIPATH_SERDC0_TXIDLE)) {
-		val &= ~(INFINIPATH_SERDC0_RESET_PLL |
-			 INFINIPATH_SERDC0_RESET_MASK |
-			 INFINIPATH_SERDC0_TXIDLE);
-		/* clear them */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-				 val);
-	}
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	if (val & INFINIPATH_XGXS_RESET) {
-		/* normally true after boot */
-		val &= ~INFINIPATH_XGXS_RESET;
-		change = 1;
-	}
-	if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
-	     INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
-		/* need to compensate for Tx inversion in partner */
-		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-		         INFINIPATH_XGXS_RX_POL_SHIFT);
-		val |= dd->ipath_rx_pol_inv <<
-			INFINIPATH_XGXS_RX_POL_SHIFT;
-		change = 1;
-	}
-	if (change)
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	/* clear current and de-emphasis bits */
-	config1 &= ~0x0ffffffff00ULL;
-	/* set current to 20ma */
-	config1 |= 0x00000000000ULL;
-	/* set de-emphasis to -5.68dB */
-	config1 |= 0x0cccc000000ULL;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
-	ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
-		   "config1=%llx, sstatus=%llx xgxs %llx\n",
-		   (unsigned long long) val, (unsigned long long) config1,
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	return ret;		/* for now, say we always succeeded */
-}
-
-/**
- * ipath_ht_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * driver is being unloaded
- */
-static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
-{
-	u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	val |= INFINIPATH_SERDC0_TXIDLE;
-	ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
-		  (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_ht_put_tid(struct ipath_devdata *dd,
-			     u64 __iomem *tidptr, u32 type,
-			     unsigned long pa)
-{
-	if (!dd->ipath_kregbase)
-		return;
-
-	if (pa != dd->ipath_tidinvalid) {
-		if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
-			dev_info(&dd->pcidev->dev,
-				 "physaddr %lx has more than "
-				 "40 bits, using only 40!!!\n", pa);
-			pa &= INFINIPATH_RT_ADDR_MASK;
-		}
-		if (type == RCVHQ_RCV_TYPE_EAGER)
-			pa |= dd->ipath_tidtemplate;
-		else {
-			/* in words (fixed, full page).  */
-			u64 lenvalid = PAGE_SIZE >> 2;
-			lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-			pa |= lenvalid | INFINIPATH_RT_VALID;
-		}
-	}
-
-	writeq(pa, tidptr);
-}
-
-
-/**
- * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * Used from ipath_close(), and at chip initialization.
- */
-static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
-	u64 __iomem *tidbase;
-	int i;
-
-	if (!dd->ipath_kregbase)
-		return;
-
-	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
-	/*
-	 * need to invalidate all of the expected TID entries for this
-	 * port, so we don't have valid entries that might somehow get
-	 * used (early in next use of this port, or through some bug)
-	 */
-	tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-				   dd->ipath_rcvtidbase +
-				   port * dd->ipath_rcvtidcnt *
-				   sizeof(*tidbase));
-	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
-		ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
-				 dd->ipath_tidinvalid);
-
-	tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-				   dd->ipath_rcvegrbase +
-				   port * dd->ipath_rcvegrcnt *
-				   sizeof(*tidbase));
-
-	for (i = 0; i < dd->ipath_rcvegrcnt; i++)
-		ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
-				 dd->ipath_tidinvalid);
-}
-
-/**
- * ipath_ht_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
-{
-	dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
-	dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-	dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
-
-	/*
-	 * work around chip errata bug 7358, by marking invalid tids
-	 * as having max length
-	 */
-	dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
-		INFINIPATH_RT_BUFSIZE_SHIFT;
-}
-
-static int ipath_ht_early_init(struct ipath_devdata *dd)
-{
-	u32 __iomem *piobuf;
-	u32 pioincr, val32;
-	int i;
-
-	/*
-	 * one cache line; long IB headers will spill over into received
-	 * buffer
-	 */
-	dd->ipath_rcvhdrentsize = 16;
-	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-
-	/*
-	 * For HT, we allocate a somewhat overly large eager buffer,
-	 * such that we can guarantee that we can receive the largest
-	 * packet that we can send out.  To truly support a 4KB MTU,
-	 * we need to bump this to a large value.  To date, other than
-	 * testing, we have never encountered an HCA that can really
-	 * send 4KB MTU packets, so we do not handle that (we'll get
-	 * errors interrupts if we ever see one).
-	 */
-	dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
-
-	/*
-	 * the min() check here is currently a nop, but it may not
-	 * always be, depending on just how we do ipath_rcvegrbufsize
-	 */
-	dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
-				 dd->ipath_rcvegrbufsize);
-	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-	ipath_ht_tidtemplate(dd);
-
-	/*
-	 * zero all the TID entries at startup.  We do this for sanity,
-	 * in case of a previous driver crash of some kind, and also
-	 * because the chip powers up with these memories in an unknown
-	 * state.  Use portcnt, not cfgports, since this is for the
-	 * full chip, not for current (possibly different) configuration
-	 * value.
-	 * Chip Errata bug 6447
-	 */
-	for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
-		ipath_ht_clear_tids(dd, val32);
-
-	/*
-	 * write the pbc of each buffer, to be sure it's initialized, then
-	 * cancel all the buffers, and also abort any packets that might
-	 * have been in flight for some reason (the latter is for driver
-	 * unload/reload, but isn't a bad idea at first init).	PIO send
-	 * isn't enabled at this point, so there is no danger of sending
-	 * these out on the wire.
-	 * Chip Errata bug 6610
-	 */
-	piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
-				  dd->ipath_piobufbase);
-	pioincr = dd->ipath_palign / sizeof(*piobuf);
-	for (i = 0; i < dd->ipath_piobcnt2k; i++) {
-		/*
-		 * reasonable word count, just to init pbc
-		 */
-		writel(16, piobuf);
-		piobuf += pioincr;
-	}
-
-	ipath_get_eeprom_info(dd);
-	if (dd->ipath_boardrev == 5) {
-		/*
-		 * Later production QHT7040 has same changes as QHT7140, so
-		 * can use GPIO interrupts.  They have serial #'s starting
-		 * with 128, rather than 112.
-		 */
-		if (dd->ipath_serial[0] == '1' &&
-		    dd->ipath_serial[1] == '2' &&
-		    dd->ipath_serial[2] == '8')
-			dd->ipath_flags |= IPATH_GPIO_INTR;
-		else {
-			ipath_dev_err(dd, "Unsupported InfiniPath board "
-				"(serial number %.16s)!\n",
-				dd->ipath_serial);
-			return 1;
-		}
-	}
-
-	if (dd->ipath_minrev >= 4) {
-		/* Rev4+ reports extra errors via internal GPIO pins */
-		dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
-		dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-				 dd->ipath_gpio_mask);
-	}
-
-	return 0;
-}
-
-
-/**
- * ipath_init_ht_get_base_info - set chip-specific flags for user code
- * @dd: the infinipath device
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
-	struct ipath_base_info *kinfo = kbase;
-
-	kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
-		IPATH_RUNTIME_PIO_REGSWAPPED;
-
-	if (pd->port_dd->ipath_minrev < 4)
-		kinfo->spi_runtime_flags |= IPATH_RUNTIME_RCVHDR_COPY;
-
-	return 0;
-}
-
-static void ipath_ht_free_irq(struct ipath_devdata *dd)
-{
-	free_irq(dd->ipath_irq, dd);
-	ht_destroy_irq(dd->ipath_irq);
-	dd->ipath_irq = 0;
-	dd->ipath_intconfig = 0;
-}
-
-static struct ipath_message_header *
-ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
-	return (struct ipath_message_header *)
-		&rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
-	dd->ipath_portcnt =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-	dd->ipath_p0_rcvegrcnt =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_ht_read_counters(struct ipath_devdata *dd,
-				   struct infinipath_counters *cntrs)
-{
-	cntrs->LBIntCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
-	cntrs->LBFlowStallCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
-	cntrs->TxSDmaDescCnt = 0;
-	cntrs->TxUnsupVLErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
-	cntrs->TxDataPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
-	cntrs->TxFlowPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
-	cntrs->TxDwordCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
-	cntrs->TxLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
-	cntrs->TxMaxMinLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
-	cntrs->TxUnderrunCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
-	cntrs->TxFlowStallCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
-	cntrs->TxDroppedPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
-	cntrs->RxDroppedPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
-	cntrs->RxDataPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
-	cntrs->RxFlowPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
-	cntrs->RxDwordCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
-	cntrs->RxLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
-	cntrs->RxMaxMinLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
-	cntrs->RxICRCErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
-	cntrs->RxVCRCErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
-	cntrs->RxFlowCtrlErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
-	cntrs->RxBadFormatCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
-	cntrs->RxLinkProblemCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
-	cntrs->RxEBPCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
-	cntrs->RxLPCRCErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
-	cntrs->RxBufOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
-	cntrs->RxTIDFullErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
-	cntrs->RxTIDValidErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
-	cntrs->RxPKeyMismatchCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
-	cntrs->RxP0HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
-	cntrs->RxP1HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
-	cntrs->RxP2HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
-	cntrs->RxP3HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
-	cntrs->RxP4HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
-	cntrs->RxP5HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
-	cntrs->RxP6HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
-	cntrs->RxP7HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
-	cntrs->RxP8HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
-	cntrs->RxP9HdrEgrOvflCnt = 0;
-	cntrs->RxP10HdrEgrOvflCnt = 0;
-	cntrs->RxP11HdrEgrOvflCnt = 0;
-	cntrs->RxP12HdrEgrOvflCnt = 0;
-	cntrs->RxP13HdrEgrOvflCnt = 0;
-	cntrs->RxP14HdrEgrOvflCnt = 0;
-	cntrs->RxP15HdrEgrOvflCnt = 0;
-	cntrs->RxP16HdrEgrOvflCnt = 0;
-	cntrs->IBStatusChangeCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
-	cntrs->IBLinkErrRecoveryCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
-	cntrs->IBLinkDownedCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
-	cntrs->IBSymbolErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
-	cntrs->RxVL15DroppedPktCnt = 0;
-	cntrs->RxOtherLocalPhyErrCnt = 0;
-	cntrs->PcieRetryBufDiagQwordCnt = 0;
-	cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
-	cntrs->LocalLinkIntegrityErrCnt =
-		(dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-		dd->ipath_lli_errs : dd->ipath_lli_errors;
-	cntrs->RxVlErrCnt = 0;
-	cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
-{
-	return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC).  Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining.  To do this right, we reset IBC
- * as well.
- */
-static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
-{
-	u64 val, prev_val;
-
-	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	val = prev_val | INFINIPATH_XGXS_RESET;
-	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control);
-}
-
-
-static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
-	int ret;
-
-	switch (which) {
-	case IPATH_IB_CFG_LWID:
-		ret = dd->ipath_link_width_active;
-		break;
-	case IPATH_IB_CFG_SPD:
-		ret = dd->ipath_link_speed_active;
-		break;
-	case IPATH_IB_CFG_LWID_ENB:
-		ret = dd->ipath_link_width_enabled;
-		break;
-	case IPATH_IB_CFG_SPD_ENB:
-		ret = dd->ipath_link_speed_enabled;
-		break;
-	default:
-		ret =  -ENOTSUPP;
-		break;
-	}
-	return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
-	int ret = 0;
-
-	if (which == IPATH_IB_CFG_LWID_ENB)
-		dd->ipath_link_width_enabled = val;
-	else if (which == IPATH_IB_CFG_SPD_ENB)
-		dd->ipath_link_speed_enabled = val;
-	else
-		ret = -ENOTSUPP;
-	return ret;
-}
-
-
-static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
-	ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
-		ipath_ib_linktrstate(dd, ibcs));
-	return 0;
-}
-
-
-/**
- * ipath_init_iba6110_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
-{
-	dd->ipath_f_intrsetup = ipath_ht_intconfig;
-	dd->ipath_f_bus = ipath_setup_ht_config;
-	dd->ipath_f_reset = ipath_setup_ht_reset;
-	dd->ipath_f_get_boardname = ipath_ht_boardname;
-	dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
-	dd->ipath_f_early_init = ipath_ht_early_init;
-	dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
-	dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
-	dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
-	dd->ipath_f_clear_tids = ipath_ht_clear_tids;
-	dd->ipath_f_put_tid = ipath_ht_put_tid;
-	dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
-	dd->ipath_f_setextled = ipath_setup_ht_setextled;
-	dd->ipath_f_get_base_info = ipath_ht_get_base_info;
-	dd->ipath_f_free_irq = ipath_ht_free_irq;
-	dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
-	dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
-	dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
-	dd->ipath_f_config_ports = ipath_ht_config_ports;
-	dd->ipath_f_read_counters = ipath_ht_read_counters;
-	dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
-	dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
-	dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
-	dd->ipath_f_config_jint = ipath_ht_config_jint;
-	dd->ipath_f_ib_updown = ipath_ht_ib_updown;
-
-	/*
-	 * initialize chip-specific variables
-	 */
-	ipath_init_ht_variables(dd);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_init_chip.c b/drivers/staging/rdma/ipath/ipath_init_chip.c
deleted file mode 100644
index a5eea19..0000000
--- a/drivers/staging/rdma/ipath/ipath_init_chip.c
+++ /dev/null
@@ -1,1062 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-/*
- * min buffers we want to have per port, after driver
- */
-#define IPATH_MIN_USER_PORT_BUFCNT 7
-
-/*
- * Number of ports we are configured to use (to allow for more pio
- * buffers per port, etc.)  Zero means use chip value.
- */
-static ushort ipath_cfgports;
-
-module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
-MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
-
-/*
- * Number of buffers reserved for driver (verbs and layered drivers.)
- * Initialized based on number of PIO buffers if not set via module interface.
- * The problem with this is that it's global, but we'll use different
- * numbers for different chip types.
- */
-static ushort ipath_kpiobufs;
-
-static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
-
-module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
-		  &ipath_kpiobufs, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
-
-/**
- * create_port0_egr - allocate the eager TID buffers
- * @dd: the infinipath device
- *
- * This code is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance.
- * This is the kernel (port0) version.
- *
- * Allocate the eager TID buffers and program them into infinipath.
- * We use the network layer alloc_skb() allocator to allocate the
- * memory, and either use the buffers as is for things like verbs
- * packets, or pass the buffers up to the ipath layered driver and
- * thence the network layer, replacing them as we do so (see
- * ipath_rcv_layer()).
- */
-static int create_port0_egr(struct ipath_devdata *dd)
-{
-	unsigned e, egrcnt;
-	struct ipath_skbinfo *skbinfo;
-	int ret;
-
-	egrcnt = dd->ipath_p0_rcvegrcnt;
-
-	skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
-	if (skbinfo == NULL) {
-		ipath_dev_err(dd, "allocation error for eager TID "
-			      "skb array\n");
-		ret = -ENOMEM;
-		goto bail;
-	}
-	for (e = 0; e < egrcnt; e++) {
-		/*
-		 * This is a bit tricky in that we allocate extra
-		 * space for 2 bytes of the 14 byte ethernet header.
-		 * These two bytes are passed in the ipath header so
-		 * the rest of the data is word aligned.  We allocate
-		 * 4 bytes so that the data buffer stays word aligned.
-		 * See ipath_kreceive() for more details.
-		 */
-		skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL);
-		if (!skbinfo[e].skb) {
-			ipath_dev_err(dd, "SKB allocation error for "
-				      "eager TID %u\n", e);
-			while (e != 0)
-				dev_kfree_skb(skbinfo[--e].skb);
-			vfree(skbinfo);
-			ret = -ENOMEM;
-			goto bail;
-		}
-	}
-	/*
-	 * After loop above, so we can test non-NULL to see if ready
-	 * to use at receive, etc.
-	 */
-	dd->ipath_port0_skbinfo = skbinfo;
-
-	for (e = 0; e < egrcnt; e++) {
-		dd->ipath_port0_skbinfo[e].phys =
-		  ipath_map_single(dd->pcidev,
-				   dd->ipath_port0_skbinfo[e].skb->data,
-				   dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
-		dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
-				    ((char __iomem *) dd->ipath_kregbase +
-				     dd->ipath_rcvegrbase),
-				    RCVHQ_RCV_TYPE_EAGER,
-				    dd->ipath_port0_skbinfo[e].phys);
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-static int bringup_link(struct ipath_devdata *dd)
-{
-	u64 val, ibc;
-	int ret = 0;
-
-	/* hold IBC in reset */
-	dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control);
-
-	/*
-	 * set initial max size pkt IBC will send, including ICRC; it's the
-	 * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
-	 */
-	val = (dd->ipath_ibmaxlen >> 2) + 1;
-	ibc = val << dd->ibcc_mpl_shift;
-
-	/* flowcontrolwatermark is in units of KBytes */
-	ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
-	/*
-	 * How often flowctrl sent.  More or less in usecs; balance against
-	 * watermark value, so that in theory senders always get a flow
-	 * control update in time to not let the IB link go idle.
-	 */
-	ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
-	/* max error tolerance */
-	ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
-	/* use "real" buffer space for */
-	ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
-	/* IB credit flow control. */
-	ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
-	/* initially come up waiting for TS1, without sending anything. */
-	dd->ipath_ibcctrl = ibc;
-	/*
-	 * Want to start out with both LINKCMD and LINKINITCMD in NOP
-	 * (0 and 0).  Don't put linkinitcmd in ipath_ibcctrl, want that
-	 * to stay a NOP. Flag that we are disabled, for the (unlikely)
-	 * case that some recovery path is trying to bring the link up
-	 * before we are ready.
-	 */
-	ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
-		INFINIPATH_IBCC_LINKINITCMD_SHIFT;
-	dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
-	ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
-		   (unsigned long long) ibc);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
-
-	// be sure chip saw it
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-	ret = dd->ipath_f_bringup_serdes(dd);
-
-	if (ret)
-		dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
-			 "not usable\n");
-	else {
-		/* enable IBC */
-		dd->ipath_control |= INFINIPATH_C_LINKENABLE;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-				 dd->ipath_control);
-	}
-
-	return ret;
-}
-
-static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
-{
-	struct ipath_portdata *pd;
-
-	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-	if (pd) {
-		pd->port_dd = dd;
-		pd->port_cnt = 1;
-		/* The port 0 pkey table is used by the layer interface. */
-		pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
-		pd->port_seq_cnt = 1;
-	}
-	return pd;
-}
-
-static int init_chip_first(struct ipath_devdata *dd)
-{
-	struct ipath_portdata *pd;
-	int ret = 0;
-	u64 val;
-
-	spin_lock_init(&dd->ipath_kernel_tid_lock);
-	spin_lock_init(&dd->ipath_user_tid_lock);
-	spin_lock_init(&dd->ipath_sendctrl_lock);
-	spin_lock_init(&dd->ipath_uctxt_lock);
-	spin_lock_init(&dd->ipath_sdma_lock);
-	spin_lock_init(&dd->ipath_gpio_lock);
-	spin_lock_init(&dd->ipath_eep_st_lock);
-	spin_lock_init(&dd->ipath_sdepb_lock);
-	mutex_init(&dd->ipath_eep_lock);
-
-	/*
-	 * skip cfgports stuff because we are not allocating memory,
-	 * and we don't want problems if the portcnt changed due to
-	 * cfgports.  We do still check and report a difference, if
-	 * not same (should be impossible).
-	 */
-	dd->ipath_f_config_ports(dd, ipath_cfgports);
-	if (!ipath_cfgports)
-		dd->ipath_cfgports = dd->ipath_portcnt;
-	else if (ipath_cfgports <= dd->ipath_portcnt) {
-		dd->ipath_cfgports = ipath_cfgports;
-		ipath_dbg("Configured to use %u ports out of %u in chip\n",
-			  dd->ipath_cfgports, ipath_read_kreg32(dd,
-			  dd->ipath_kregs->kr_portcnt));
-	} else {
-		dd->ipath_cfgports = dd->ipath_portcnt;
-		ipath_dbg("Tried to configured to use %u ports; chip "
-			  "only supports %u\n", ipath_cfgports,
-			  ipath_read_kreg32(dd,
-				  dd->ipath_kregs->kr_portcnt));
-	}
-	/*
-	 * Allocate full portcnt array, rather than just cfgports, because
-	 * cleanup iterates across all possible ports.
-	 */
-	dd->ipath_pd = kcalloc(dd->ipath_portcnt, sizeof(*dd->ipath_pd),
-			       GFP_KERNEL);
-
-	if (!dd->ipath_pd) {
-		ipath_dev_err(dd, "Unable to allocate portdata array, "
-			      "failing\n");
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	pd = create_portdata0(dd);
-	if (!pd) {
-		ipath_dev_err(dd, "Unable to allocate portdata for port "
-			      "0, failing\n");
-		ret = -ENOMEM;
-		goto done;
-	}
-	dd->ipath_pd[0] = pd;
-
-	dd->ipath_rcvtidcnt =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
-	dd->ipath_rcvtidbase =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
-	dd->ipath_rcvegrcnt =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-	dd->ipath_rcvegrbase =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
-	dd->ipath_palign =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-	dd->ipath_piobufbase =
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
-	dd->ipath_piosize2k = val & ~0U;
-	dd->ipath_piosize4k = val >> 32;
-	if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
-		ipath_mtu4096 = 0; /* 4KB not supported by this chip */
-	dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
-	dd->ipath_piobcnt2k = val & ~0U;
-	dd->ipath_piobcnt4k = val >> 32;
-	dd->ipath_pio2kbase =
-		(u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
-				 (dd->ipath_piobufbase & 0xffffffff));
-	if (dd->ipath_piobcnt4k) {
-		dd->ipath_pio4kbase = (u32 __iomem *)
-			(((char __iomem *) dd->ipath_kregbase) +
-			 (dd->ipath_piobufbase >> 32));
-		/*
-		 * 4K buffers take 2 pages; we use roundup just to be
-		 * paranoid; we calculate it once here, rather than on
-		 * ever buf allocate
-		 */
-		dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
-					  dd->ipath_palign);
-		ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
-			  "(%x aligned)\n",
-			  dd->ipath_piobcnt2k, dd->ipath_piosize2k,
-			  dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
-			  dd->ipath_piosize4k, dd->ipath_pio4kbase,
-			  dd->ipath_4kalign);
-	} else {
-		ipath_dbg("%u 2k piobufs @ %p\n",
-			  dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
-	}
-done:
-	return ret;
-}
-
-/**
- * init_chip_reset - re-initialize after a reset, or enable
- * @dd: the infinipath device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_chip_reset(struct ipath_devdata *dd)
-{
-	u32 rtmp;
-	int i;
-	unsigned long flags;
-
-	/*
-	 * ensure chip does no sends or receives, tail updates, or
-	 * pioavail updates while we re-initialize
-	 */
-	dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
-	for (i = 0; i < dd->ipath_portcnt; i++) {
-		clear_bit(dd->ipath_r_portenable_shift + i,
-			  &dd->ipath_rcvctrl);
-		clear_bit(dd->ipath_r_intravail_shift + i,
-			  &dd->ipath_rcvctrl);
-	}
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-		dd->ipath_rcvctrl);
-
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	dd->ipath_sendctrl = 0U; /* no sdma, etc */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
-	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
-	if (rtmp != dd->ipath_rcvtidcnt)
-		dev_info(&dd->pcidev->dev, "tidcnt was %u before "
-			 "reset, now %u, using original\n",
-			 dd->ipath_rcvtidcnt, rtmp);
-	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
-	if (rtmp != dd->ipath_rcvtidbase)
-		dev_info(&dd->pcidev->dev, "tidbase was %u before "
-			 "reset, now %u, using original\n",
-			 dd->ipath_rcvtidbase, rtmp);
-	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-	if (rtmp != dd->ipath_rcvegrcnt)
-		dev_info(&dd->pcidev->dev, "egrcnt was %u before "
-			 "reset, now %u, using original\n",
-			 dd->ipath_rcvegrcnt, rtmp);
-	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
-	if (rtmp != dd->ipath_rcvegrbase)
-		dev_info(&dd->pcidev->dev, "egrbase was %u before "
-			 "reset, now %u, using original\n",
-			 dd->ipath_rcvegrbase, rtmp);
-
-	return 0;
-}
-
-static int init_pioavailregs(struct ipath_devdata *dd)
-{
-	int ret;
-
-	dd->ipath_pioavailregs_dma = dma_alloc_coherent(
-		&dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
-		GFP_KERNEL);
-	if (!dd->ipath_pioavailregs_dma) {
-		ipath_dev_err(dd, "failed to allocate PIOavail reg area "
-			      "in memory\n");
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	/*
-	 * we really want L2 cache aligned, but for current CPUs of
-	 * interest, they are the same.
-	 */
-	dd->ipath_statusp = (u64 *)
-		((char *)dd->ipath_pioavailregs_dma +
-		 ((2 * L1_CACHE_BYTES +
-		   dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
-	/* copy the current value now that it's really allocated */
-	*dd->ipath_statusp = dd->_ipath_status;
-	/*
-	 * setup buffer to hold freeze msg, accessible to apps,
-	 * following statusp
-	 */
-	dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
-	/* and its length */
-	dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
-
-	ret = 0;
-
-done:
-	return ret;
-}
-
-/**
- * init_shadow_tids - allocate the shadow TID array
- * @dd: the infinipath device
- *
- * allocate the shadow TID array, so we can ipath_munlock previous
- * entries.  It may make more sense to move the pageshadow to the
- * port data structure, so we only allocate memory for ports actually
- * in use, since we at 8k per port, now.
- */
-static void init_shadow_tids(struct ipath_devdata *dd)
-{
-	struct page **pages;
-	dma_addr_t *addrs;
-
-	pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-			sizeof(struct page *));
-	if (!pages) {
-		ipath_dev_err(dd, "failed to allocate shadow page * "
-			      "array, no expected sends!\n");
-		dd->ipath_pageshadow = NULL;
-		return;
-	}
-
-	addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-			sizeof(dma_addr_t));
-	if (!addrs) {
-		ipath_dev_err(dd, "failed to allocate shadow dma handle "
-			      "array, no expected sends!\n");
-		vfree(pages);
-		dd->ipath_pageshadow = NULL;
-		return;
-	}
-
-	dd->ipath_pageshadow = pages;
-	dd->ipath_physshadow = addrs;
-}
-
-static void enable_chip(struct ipath_devdata *dd, int reinit)
-{
-	u32 val;
-	u64 rcvmask;
-	unsigned long flags;
-	int i;
-
-	if (!reinit)
-		init_waitqueue_head(&ipath_state_wait);
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-			 dd->ipath_rcvctrl);
-
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	/* Enable PIO send, and update of PIOavail regs to memory. */
-	dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
-		INFINIPATH_S_PIOBUFAVAILUPD;
-
-	/*
-	 * Set the PIO avail update threshold to host memory
-	 * on chips that support it.
-	 */
-	if (dd->ipath_pioupd_thresh)
-		dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
-			<< INFINIPATH_S_UPDTHRESH_SHIFT;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-	/*
-	 * Enable kernel ports' receive and receive interrupt.
-	 * Other ports done as user opens and inits them.
-	 */
-	rcvmask = 1ULL;
-	dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
-		(rcvmask << dd->ipath_r_intravail_shift);
-	if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
-		dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-			 dd->ipath_rcvctrl);
-
-	/*
-	 * now ready for use.  this should be cleared whenever we
-	 * detect a reset, or initiate one.
-	 */
-	dd->ipath_flags |= IPATH_INITTED;
-
-	/*
-	 * Init our shadow copies of head from tail values,
-	 * and write head values to match.
-	 */
-	val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
-	ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
-
-	/* Initialize so we interrupt on next packet received */
-	ipath_write_ureg(dd, ur_rcvhdrhead,
-			 dd->ipath_rhdrhead_intr_off |
-			 dd->ipath_pd[0]->port_head, 0);
-
-	/*
-	 * by now pioavail updates to memory should have occurred, so
-	 * copy them into our working/shadow registers; this is in
-	 * case something went wrong with abort, but mostly to get the
-	 * initial values of the generation bit correct.
-	 */
-	for (i = 0; i < dd->ipath_pioavregs; i++) {
-		__le64 pioavail;
-
-		/*
-		 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
-		 */
-		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-			pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
-		else
-			pioavail = dd->ipath_pioavailregs_dma[i];
-		/*
-		 * don't need to worry about ipath_pioavailkernel here
-		 * because we will call ipath_chg_pioavailkernel() later
-		 * in initialization, to busy out buffers as needed
-		 */
-		dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
-	}
-	/* can get counters, stats, etc. */
-	dd->ipath_flags |= IPATH_PRESENT;
-}
-
-static int init_housekeeping(struct ipath_devdata *dd, int reinit)
-{
-	char boardn[40];
-	int ret = 0;
-
-	/*
-	 * have to clear shadow copies of registers at init that are
-	 * not otherwise set here, or all kinds of bizarre things
-	 * happen with driver on chip reset
-	 */
-	dd->ipath_rcvhdrsize = 0;
-
-	/*
-	 * Don't clear ipath_flags as 8bit mode was set before
-	 * entering this func. However, we do set the linkstate to
-	 * unknown, so we can watch for a transition.
-	 * PRESENT is set because we want register reads to work,
-	 * and the kernel infrastructure saw it in config space;
-	 * We clear it if we have failures.
-	 */
-	dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
-	dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
-			     IPATH_LINKDOWN | IPATH_LINKINIT);
-
-	ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
-	dd->ipath_revision =
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
-
-	/*
-	 * set up fundamental info we need to use the chip; we assume
-	 * if the revision reg and these regs are OK, we don't need to
-	 * special case the rest
-	 */
-	dd->ipath_sregbase =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
-	dd->ipath_cregbase =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
-	dd->ipath_uregbase =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
-	ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
-		   "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
-		   dd->ipath_uregbase, dd->ipath_cregbase);
-	if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
-	    || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
-	    || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
-	    || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
-		ipath_dev_err(dd, "Register read failures from chip, "
-			      "giving up initialization\n");
-		dd->ipath_flags &= ~IPATH_PRESENT;
-		ret = -ENODEV;
-		goto done;
-	}
-
-
-	/* clear diagctrl register, in case diags were running and crashed */
-	ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);
-
-	/* clear the initial reset flag, in case first driver load */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-			 INFINIPATH_E_RESET);
-
-	ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
-		   (unsigned long long) dd->ipath_revision,
-		   dd->ipath_pcirev);
-
-	if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
-	     INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
-		ipath_dev_err(dd, "Driver only handles version %d, "
-			      "chip swversion is %d (%llx), failng\n",
-			      IPATH_CHIP_SWVERSION,
-			      (int)(dd->ipath_revision >>
-				    INFINIPATH_R_SOFTWARE_SHIFT) &
-			      INFINIPATH_R_SOFTWARE_MASK,
-			      (unsigned long long) dd->ipath_revision);
-		ret = -ENOSYS;
-		goto done;
-	}
-	dd->ipath_majrev = (u8) ((dd->ipath_revision >>
-				  INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
-				 INFINIPATH_R_CHIPREVMAJOR_MASK);
-	dd->ipath_minrev = (u8) ((dd->ipath_revision >>
-				  INFINIPATH_R_CHIPREVMINOR_SHIFT) &
-				 INFINIPATH_R_CHIPREVMINOR_MASK);
-	dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
-				    INFINIPATH_R_BOARDID_SHIFT) &
-				   INFINIPATH_R_BOARDID_MASK);
-
-	ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
-
-	snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
-		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
-		 "SW Compat %u\n",
-		 IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
-		 (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
-		 INFINIPATH_R_ARCH_MASK,
-		 dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
-		 (unsigned)(dd->ipath_revision >>
-			    INFINIPATH_R_SOFTWARE_SHIFT) &
-		 INFINIPATH_R_SOFTWARE_MASK);
-
-	ipath_dbg("%s", dd->ipath_boardversion);
-
-	if (ret)
-		goto done;
-
-	if (reinit)
-		ret = init_chip_reset(dd);
-	else
-		ret = init_chip_first(dd);
-
-done:
-	return ret;
-}
-
-static void verify_interrupt(unsigned long opaque)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-
-	if (!dd)
-		return; /* being torn down */
-
-	/*
-	 * If we don't have any interrupts, let the user know and
-	 * don't bother checking again.
-	 */
-	if (dd->ipath_int_counter == 0) {
-		if (!dd->ipath_f_intr_fallback(dd))
-			dev_err(&dd->pcidev->dev, "No interrupts detected, "
-				"not usable.\n");
-		else /* re-arm the timer to see if fallback works */
-			mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
-	} else
-		ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
-			dd->ipath_int_counter);
-}
-
-/**
- * ipath_init_chip - do the actual initialization sequence on the chip
- * @dd: the infinipath device
- * @reinit: reinitializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip.  This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0).  We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int ipath_init_chip(struct ipath_devdata *dd, int reinit)
-{
-	int ret = 0;
-	u32 kpiobufs, defkbufs;
-	u32 piobufs, uports;
-	u64 val;
-	struct ipath_portdata *pd;
-	gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-
-	ret = init_housekeeping(dd, reinit);
-	if (ret)
-		goto done;
-
-	/*
-	 * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
-	 * but then it no longer nicely fits power of two, and since
-	 * we now use routines that backend onto __get_free_pages, the
-	 * rest would be wasted.
-	 */
-	dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
-			 dd->ipath_rcvhdrcnt);
-
-	/*
-	 * Set up the shadow copies of the piobufavail registers,
-	 * which we compare against the chip registers for now, and
-	 * the in memory DMA'ed copies of the registers.  This has to
-	 * be done early, before we calculate lastport, etc.
-	 */
-	piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-	/*
-	 * calc number of pioavail registers, and save it; we have 2
-	 * bits per buffer.
-	 */
-	dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
-		/ (sizeof(u64) * BITS_PER_BYTE / 2);
-	uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
-	if (piobufs > 144)
-		defkbufs = 32 + dd->ipath_pioreserved;
-	else
-		defkbufs = 16 + dd->ipath_pioreserved;
-
-	if (ipath_kpiobufs && (ipath_kpiobufs +
-		(uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
-		int i = (int) piobufs -
-			(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
-		if (i < 1)
-			i = 1;
-		dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
-			 "%d for kernel leaves too few for %d user ports "
-			 "(%d each); using %u\n", ipath_kpiobufs,
-			 piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
-		/*
-		 * shouldn't change ipath_kpiobufs, because could be
-		 * different for different devices...
-		 */
-		kpiobufs = i;
-	} else if (ipath_kpiobufs)
-		kpiobufs = ipath_kpiobufs;
-	else
-		kpiobufs = defkbufs;
-	dd->ipath_lastport_piobuf = piobufs - kpiobufs;
-	dd->ipath_pbufsport =
-		uports ? dd->ipath_lastport_piobuf / uports : 0;
-	/* if not an even divisor, some user ports get extra buffers */
-	dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
-		(dd->ipath_pbufsport * uports);
-	if (dd->ipath_ports_extrabuf)
-		ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
-			"ports <= %u\n", dd->ipath_pbufsport,
-			dd->ipath_ports_extrabuf);
-	dd->ipath_lastpioindex = 0;
-	dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
-	/* ipath_pioavailshadow initialized earlier */
-	ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
-		   "each for %u user ports\n", kpiobufs,
-		   piobufs, dd->ipath_pbufsport, uports);
-	ret = dd->ipath_f_early_init(dd);
-	if (ret) {
-		ipath_dev_err(dd, "Early initialization failure\n");
-		goto done;
-	}
-
-	/*
-	 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
-	 * done after early_init.
-	 */
-	dd->ipath_hdrqlast =
-		dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
-			 dd->ipath_rcvhdrentsize);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
-			 dd->ipath_rcvhdrsize);
-
-	if (!reinit) {
-		ret = init_pioavailregs(dd);
-		init_shadow_tids(dd);
-		if (ret)
-			goto done;
-	}
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
-			 dd->ipath_pioavailregs_phys);
-
-	/*
-	 * this is to detect s/w errors, which the h/w works around by
-	 * ignoring the low 6 bits of address, if it wasn't aligned.
-	 */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
-	if (val != dd->ipath_pioavailregs_phys) {
-		ipath_dev_err(dd, "Catastrophic software error, "
-			      "SendPIOAvailAddr written as %lx, "
-			      "read back as %llx\n",
-			      (unsigned long) dd->ipath_pioavailregs_phys,
-			      (unsigned long long) val);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
-
-	/*
-	 * make sure we are not in freeze, and PIO send enabled, so
-	 * writes to pbc happen
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
-	/*
-	 * before error clears, since we expect serdes pll errors during
-	 * this, the first time after reset
-	 */
-	if (bringup_link(dd)) {
-		dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
-		ret = -ENETDOWN;
-		goto done;
-	}
-
-	/*
-	 * clear any "expected" hwerrs from reset and/or initialization
-	 * clear any that aren't enabled (at least this once), and then
-	 * set the enable mask
-	 */
-	dd->ipath_f_init_hwerrors(dd);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-			 dd->ipath_hwerrmask);
-
-	/* clear all */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
-	/* enable errors that are masked, at least this first time. */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-			 ~dd->ipath_maskederrs);
-	dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
-	dd->ipath_errormask =
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
-	/* clear any interrupts up to this point (ints still not enabled) */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
-	dd->ipath_f_tidtemplate(dd);
-
-	/*
-	 * Set up the port 0 (kernel) rcvhdr q and egr TIDs.  If doing
-	 * re-init, the simplest way to handle this is to free
-	 * existing, and re-allocate.
-	 * Need to re-create rest of port 0 portdata as well.
-	 */
-	pd = dd->ipath_pd[0];
-	if (reinit) {
-		struct ipath_portdata *npd;
-
-		/*
-		 * Alloc and init new ipath_portdata for port0,
-		 * Then free old pd. Could lead to fragmentation, but also
-		 * makes later support for hot-swap easier.
-		 */
-		npd = create_portdata0(dd);
-		if (npd) {
-			ipath_free_pddata(dd, pd);
-			dd->ipath_pd[0] = npd;
-			pd = npd;
-		} else {
-			ipath_dev_err(dd, "Unable to allocate portdata"
-				      " for port 0, failing\n");
-			ret = -ENOMEM;
-			goto done;
-		}
-	}
-	ret = ipath_create_rcvhdrq(dd, pd);
-	if (!ret)
-		ret = create_port0_egr(dd);
-	if (ret) {
-		ipath_dev_err(dd, "failed to allocate kernel port's "
-			      "rcvhdrq and/or egr bufs\n");
-		goto done;
-	} else {
-		enable_chip(dd, reinit);
-	}
-
-	/* after enable_chip, so pioavailshadow setup */
-	ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
-
-	/*
-	 * Cancel any possible active sends from early driver load.
-	 * Follows early_init because some chips have to initialize
-	 * PIO buffers in early_init to avoid false parity errors.
-	 * After enable and ipath_chg_pioavailkernel so we can safely
-	 * enable pioavail updates and PIOENABLE; packets are now
-	 * ready to go out.
-	 */
-	ipath_cancel_sends(dd, 1);
-
-	if (!reinit) {
-		/*
-		 * Used when we close a port, for DMA already in flight
-		 * at close.
-		 */
-		dd->ipath_dummy_hdrq = dma_alloc_coherent(
-			&dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
-			&dd->ipath_dummy_hdrq_phys,
-			gfp_flags);
-		if (!dd->ipath_dummy_hdrq) {
-			dev_info(&dd->pcidev->dev,
-				"Couldn't allocate 0x%lx bytes for dummy hdrq\n",
-				dd->ipath_pd[0]->port_rcvhdrq_size);
-			/* fallback to just 0'ing */
-			dd->ipath_dummy_hdrq_phys = 0UL;
-		}
-	}
-
-	/*
-	 * cause retrigger of pending interrupts ignored during init,
-	 * even if we had errors
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-
-	if (!dd->ipath_stats_timer_active) {
-		/*
-		 * first init, or after an admin disable/enable
-		 * set up stats retrieval timer, even if we had errors
-		 * in last portion of setup
-		 */
-		setup_timer(&dd->ipath_stats_timer, ipath_get_faststats,
-				(unsigned long)dd);
-		/* every 5 seconds; */
-		dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
-		/* takes ~16 seconds to overflow at full IB 4x bandwdith */
-		add_timer(&dd->ipath_stats_timer);
-		dd->ipath_stats_timer_active = 1;
-	}
-
-	/* Set up SendDMA if chip supports it */
-	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-		ret = setup_sdma(dd);
-
-	/* Set up HoL state */
-	setup_timer(&dd->ipath_hol_timer, ipath_hol_event, (unsigned long)dd);
-
-	dd->ipath_hol_state = IPATH_HOL_UP;
-
-done:
-	if (!ret) {
-		*dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
-		if (!dd->ipath_f_intrsetup(dd)) {
-			/* now we can enable all interrupts from the chip */
-			ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
-					 -1LL);
-			/* force re-interrupt of any pending interrupts. */
-			ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
-					 0ULL);
-			/* chip is usable; mark it as initialized */
-			*dd->ipath_statusp |= IPATH_STATUS_INITTED;
-
-			/*
-			 * setup to verify we get an interrupt, and fallback
-			 * to an alternate if necessary and possible
-			 */
-			if (!reinit) {
-				setup_timer(&dd->ipath_intrchk_timer,
-						verify_interrupt,
-						(unsigned long)dd);
-			}
-			dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
-			add_timer(&dd->ipath_intrchk_timer);
-		} else
-			ipath_dev_err(dd, "No interrupts enabled, couldn't "
-				      "setup interrupt address\n");
-
-		if (dd->ipath_cfgports > ipath_stats.sps_nports)
-			/*
-			 * sps_nports is a global, so, we set it to
-			 * the highest number of ports of any of the
-			 * chips we find; we never decrement it, at
-			 * least for now.  Since this might have changed
-			 * over disable/enable or prior to reset, always
-			 * do the check and potentially adjust.
-			 */
-			ipath_stats.sps_nports = dd->ipath_cfgports;
-	} else
-		ipath_dbg("Failed (%d) to initialize chip\n", ret);
-
-	/* if ret is non-zero, we probably should do some cleanup
-	   here... */
-	return ret;
-}
-
-static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
-{
-	struct ipath_devdata *dd;
-	unsigned long flags;
-	unsigned short val;
-	int ret;
-
-	ret = ipath_parse_ushort(str, &val);
-
-	spin_lock_irqsave(&ipath_devs_lock, flags);
-
-	if (ret < 0)
-		goto bail;
-
-	if (val == 0) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-		if (dd->ipath_kregbase)
-			continue;
-		if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
-			   (dd->ipath_cfgports *
-			    IPATH_MIN_USER_PORT_BUFCNT)))
-		{
-			ipath_dev_err(
-				dd,
-				"Allocating %d PIO bufs for kernel leaves "
-				"too few for %d user ports (%d each)\n",
-				val, dd->ipath_cfgports - 1,
-				IPATH_MIN_USER_PORT_BUFCNT);
-			ret = -EINVAL;
-			goto bail;
-		}
-		dd->ipath_lastport_piobuf =
-			dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
-	}
-
-	ipath_kpiobufs = val;
-	ret = 0;
-bail:
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_intr.c b/drivers/staging/rdma/ipath/ipath_intr.c
deleted file mode 100644
index 0403fa2..0000000
--- a/drivers/staging/rdma/ipath/ipath_intr.c
+++ /dev/null
@@ -1,1271 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- */
-void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
-{
-	u32 piobcnt;
-	unsigned long sbuf[4];
-	/*
-	 * it's possible that sendbuffererror could have bits set; might
-	 * have already done this as a result of hardware error handling
-	 */
-	piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-	/* read these before writing errorclear */
-	sbuf[0] = ipath_read_kreg64(
-		dd, dd->ipath_kregs->kr_sendbuffererror);
-	sbuf[1] = ipath_read_kreg64(
-		dd, dd->ipath_kregs->kr_sendbuffererror + 1);
-	if (piobcnt > 128)
-		sbuf[2] = ipath_read_kreg64(
-			dd, dd->ipath_kregs->kr_sendbuffererror + 2);
-	if (piobcnt > 192)
-		sbuf[3] = ipath_read_kreg64(
-			dd, dd->ipath_kregs->kr_sendbuffererror + 3);
-	else
-		sbuf[3] = 0;
-
-	if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
-		int i;
-		if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
-			time_after(dd->ipath_lastcancel, jiffies)) {
-			__IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
-					  "SendbufErrs %lx %lx", sbuf[0],
-					  sbuf[1]);
-			if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
-				printk(" %lx %lx ", sbuf[2], sbuf[3]);
-			printk("\n");
-		}
-
-		for (i = 0; i < piobcnt; i++)
-			if (test_bit(i, sbuf))
-				ipath_disarm_piobufs(dd, i, 1);
-		/* ignore armlaunch errs for a bit */
-		dd->ipath_lastcancel = jiffies+3;
-	}
-}
-
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
-	(INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
-	 INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
-	 INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
-	 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
-	 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
-	 INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
-	(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
-	 INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-	 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
-	 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
-	 INFINIPATH_E_INVALIDADDR)
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers.  Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
-	 (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-	 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
-	 INFINIPATH_E_SPKTLEN)
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received.  This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
-	(INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-	 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
-	 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
-	 INFINIPATH_E_RUNEXPCHAR)
-
-static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
-{
-	u64 ignore_this_time = 0;
-
-	ipath_disarm_senderrbufs(dd);
-	if ((errs & E_SUM_LINK_PKTERRS) &&
-	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-		/*
-		 * This can happen when SMA is trying to bring the link
-		 * up, but the IB link changes state at the "wrong" time.
-		 * The IB logic then complains that the packet isn't
-		 * valid.  We don't want to confuse people, so we just
-		 * don't print them, except at debug
-		 */
-		ipath_dbg("Ignoring packet errors %llx, because link not "
-			  "ACTIVE\n", (unsigned long long) errs);
-		ignore_this_time = errs & E_SUM_LINK_PKTERRS;
-	}
-
-	return ignore_this_time;
-}
-
-/* generic hw error messages... */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \
-	{ \
-		.mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a <<    \
-			  INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ),   \
-		.msg = "TXE " #a " Memory Parity"	     \
-	}
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \
-	{ \
-		.mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a <<    \
-			  INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ),   \
-		.msg = "RXE " #a " Memory Parity"	     \
-	}
-
-static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {
-	INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),
-	INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),
-
-	INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),
-	INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),
-	INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),
-
-	INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),
-	INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),
-	INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),
-	INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),
-	INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),
-	INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),
-	INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),
-};
-
-/**
- * ipath_format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
- */
-static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
-	strlcat(msg, "[", msgl);
-	strlcat(msg, hwmsg, msgl);
-	strlcat(msg, "]", msgl);
-}
-
-/**
- * ipath_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
- */
-void ipath_format_hwerrors(u64 hwerrs,
-			   const struct ipath_hwerror_msgs *hwerrmsgs,
-			   size_t nhwerrmsgs,
-			   char *msg, size_t msgl)
-{
-	int i;
-	const int glen =
-	    ARRAY_SIZE(ipath_generic_hwerror_msgs);
-
-	for (i=0; i<glen; i++) {
-		if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
-			ipath_format_hwmsg(msg, msgl,
-					   ipath_generic_hwerror_msgs[i].msg);
-		}
-	}
-
-	for (i=0; i<nhwerrmsgs; i++) {
-		if (hwerrs & hwerrmsgs[i].mask) {
-			ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-		}
-	}
-}
-
-/* return the strings for the most common link states */
-static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
-	char *ret;
-	u32 state;
-
-	state = ipath_ib_state(dd, ibcs);
-	if (state == dd->ib_init)
-		ret = "Init";
-	else if (state == dd->ib_arm)
-		ret = "Arm";
-	else if (state == dd->ib_active)
-		ret = "Active";
-	else
-		ret = "Down";
-	return ret;
-}
-
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
-{
-	struct ib_event event;
-
-	event.device = &dd->verbs_dev->ibdev;
-	event.element.port_num = 1;
-	event.event = ev;
-	ib_dispatch_event(&event);
-}
-
-static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
-				     ipath_err_t errs)
-{
-	u32 ltstate, lstate, ibstate, lastlstate;
-	u32 init = dd->ib_init;
-	u32 arm = dd->ib_arm;
-	u32 active = dd->ib_active;
-	const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-
-	lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
-	ibstate = ipath_ib_state(dd, ibcs);
-	/* linkstate at last interrupt */
-	lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
-	ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
-
-	/*
-	 * Since going into a recovery state causes the link state to go
-	 * down and since recovery is transitory, it is better if we "miss"
-	 * ever seeing the link training state go into recovery (i.e.,
-	 * ignore this transition for link state special handling purposes)
-	 * without even updating ipath_lastibcstat.
-	 */
-	if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
-	    (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
-	    (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
-		goto done;
-
-	/*
-	 * if linkstate transitions into INIT from any of the various down
-	 * states, or if it transitions from any of the up (INIT or better)
-	 * states into any of the down states (except link recovery), then
-	 * call the chip-specific code to take appropriate actions.
-	 */
-	if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
-		lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
-		/* transitioned to UP */
-		if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
-			/* link came up, so we must no longer be disabled */
-			dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
-			ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
-			goto skip_ibchange; /* chip-code handled */
-		}
-	} else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
-		(dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
-		ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
-		ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
-		int handled;
-		handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
-		dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
-		if (handled) {
-			ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
-			goto skip_ibchange; /* chip-code handled */
-		}
-	}
-
-	/*
-	 * Significant enough to always print and get into logs, if it was
-	 * unexpected.  If it was a requested state change, we'll have
-	 * already cleared the flags, so we won't print this warning
-	 */
-	if ((ibstate != arm && ibstate != active) &&
-	    (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
-		dev_info(&dd->pcidev->dev, "Link state changed from %s "
-			 "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
-			 "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
-	}
-
-	if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
-	    ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-		u32 lastlts;
-		lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
-		/*
-		 * Ignore cycling back and forth from Polling.Active to
-		 * Polling.Quiet while waiting for the other end of the link
-		 * to come up, except to try and decide if we are connected
-		 * to a live IB device or not.  We will cycle back and
-		 * forth between them if no cable is plugged in, the other
-		 * device is powered off or disabled, etc.
-		 */
-		if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
-		    lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-			if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
-			     (++dd->ipath_ibpollcnt == 40)) {
-				dd->ipath_flags |= IPATH_NOCABLE;
-				*dd->ipath_statusp |=
-					IPATH_STATUS_IB_NOCABLE;
-				ipath_cdbg(LINKVERB, "Set NOCABLE\n");
-			}
-			ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
-				ipath_ibcstatus_str[ltstate], ibstate);
-			goto skip_ibchange;
-		}
-	}
-
-	dd->ipath_ibpollcnt = 0; /* not poll*, now */
-	ipath_stats.sps_iblink++;
-
-	if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
-		u64 linkrecov;
-		linkrecov = ipath_snap_cntr(dd,
-			dd->ipath_cregs->cr_iblinkerrrecovcnt);
-		if (linkrecov != dd->ipath_lastlinkrecov) {
-			ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
-				(unsigned long long) ibcs,
-				ib_linkstate(dd, ibcs),
-				ipath_ibcstatus_str[ltstate],
-				(unsigned long long) linkrecov);
-			/* and no more until active again */
-			dd->ipath_lastlinkrecov = 0;
-			ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-			goto skip_ibchange;
-		}
-	}
-
-	if (ibstate == init || ibstate == arm || ibstate == active) {
-		*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
-		if (ibstate == init || ibstate == arm) {
-			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-			if (dd->ipath_flags & IPATH_LINKACTIVE)
-				signal_ib_event(dd, IB_EVENT_PORT_ERR);
-		}
-		if (ibstate == arm) {
-			dd->ipath_flags |= IPATH_LINKARMED;
-			dd->ipath_flags &= ~(IPATH_LINKUNK |
-				IPATH_LINKINIT | IPATH_LINKDOWN |
-				IPATH_LINKACTIVE | IPATH_NOCABLE);
-			ipath_hol_down(dd);
-		} else  if (ibstate == init) {
-			/*
-			 * set INIT and DOWN.  Down is checked by
-			 * most of the other code, but INIT is
-			 * useful to know in a few places.
-			 */
-			dd->ipath_flags |= IPATH_LINKINIT |
-				IPATH_LINKDOWN;
-			dd->ipath_flags &= ~(IPATH_LINKUNK |
-				IPATH_LINKARMED | IPATH_LINKACTIVE |
-				IPATH_NOCABLE);
-			ipath_hol_down(dd);
-		} else {  /* active */
-			dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
-				dd->ipath_cregs->cr_iblinkerrrecovcnt);
-			*dd->ipath_statusp |=
-				IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
-			dd->ipath_flags |= IPATH_LINKACTIVE;
-			dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-				| IPATH_LINKDOWN | IPATH_LINKARMED |
-				IPATH_NOCABLE);
-			if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-				ipath_restart_sdma(dd);
-			signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
-			/* LED active not handled in chip _f_updown */
-			dd->ipath_f_setextled(dd, lstate, ltstate);
-			ipath_hol_up(dd);
-		}
-
-		/*
-		 * print after we've already done the work, so as not to
-		 * delay the state changes and notifications, for debugging
-		 */
-		if (lstate == lastlstate)
-			ipath_cdbg(LINKVERB, "Unchanged from last: %s "
-				"(%x)\n", ib_linkstate(dd, ibcs), ibstate);
-		else
-			ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
-				  dd->ipath_unit, ib_linkstate(dd, ibcs),
-				  ipath_ibcstatus_str[ltstate],  ibstate);
-	} else { /* down */
-		if (dd->ipath_flags & IPATH_LINKACTIVE)
-			signal_ib_event(dd, IB_EVENT_PORT_ERR);
-		dd->ipath_flags |= IPATH_LINKDOWN;
-		dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-				     | IPATH_LINKACTIVE |
-				     IPATH_LINKARMED);
-		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-		dd->ipath_lli_counter = 0;
-
-		if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
-			ipath_cdbg(VERBOSE, "Unit %u link state down "
-				   "(state 0x%x), from %s\n",
-				   dd->ipath_unit, lstate,
-				   ib_linkstate(dd, dd->ipath_lastibcstat));
-		else
-			ipath_cdbg(LINKVERB, "Unit %u link state changed "
-				   "to %s (0x%x) from down (%x)\n",
-				   dd->ipath_unit,
-				   ipath_ibcstatus_str[ltstate],
-				   ibstate, lastlstate);
-	}
-
-skip_ibchange:
-	dd->ipath_lastibcstat = ibcs;
-done:
-	return;
-}
-
-static void handle_supp_msgs(struct ipath_devdata *dd,
-			     unsigned supp_msgs, char *msg, u32 msgsz)
-{
-	/*
-	 * Print the message unless it's ibc status change only, which
-	 * happens so often we never want to count it.
-	 */
-	if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
-		int iserr;
-		ipath_err_t mask;
-		iserr = ipath_decode_err(dd, msg, msgsz,
-					 dd->ipath_lasterror &
-					 ~INFINIPATH_E_IBSTATUSCHANGED);
-
-		mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-			INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
-
-		/* if we're in debug, then don't mask SDMADISABLED msgs */
-		if (ipath_debug & __IPATH_DBG)
-			mask &= ~INFINIPATH_E_SDMADISABLED;
-
-		if (dd->ipath_lasterror & ~mask)
-			ipath_dev_err(dd, "Suppressed %u messages for "
-				      "fast-repeating errors (%s) (%llx)\n",
-				      supp_msgs, msg,
-				      (unsigned long long)
-				      dd->ipath_lasterror);
-		else {
-			/*
-			 * rcvegrfull and rcvhdrqfull are "normal", for some
-			 * types of processes (mostly benchmarks) that send
-			 * huge numbers of messages, while not processing
-			 * them. So only complain about these at debug
-			 * level.
-			 */
-			if (iserr)
-				ipath_dbg("Suppressed %u messages for %s\n",
-					  supp_msgs, msg);
-			else
-				ipath_cdbg(ERRPKT,
-					"Suppressed %u messages for %s\n",
-					  supp_msgs, msg);
-		}
-	}
-}
-
-static unsigned handle_frequent_errors(struct ipath_devdata *dd,
-				       ipath_err_t errs, char *msg,
-				       u32 msgsz, int *noprint)
-{
-	unsigned long nc;
-	static unsigned long nextmsg_time;
-	static unsigned nmsgs, supp_msgs;
-
-	/*
-	 * Throttle back "fast" messages to no more than 10 per 5 seconds.
-	 * This isn't perfect, but it's a reasonable heuristic. If we get
-	 * more than 10, give a 6x longer delay.
-	 */
-	nc = jiffies;
-	if (nmsgs > 10) {
-		if (time_before(nc, nextmsg_time)) {
-			*noprint = 1;
-			if (!supp_msgs++)
-				nextmsg_time = nc + HZ * 3;
-		} else if (supp_msgs) {
-			handle_supp_msgs(dd, supp_msgs, msg, msgsz);
-			supp_msgs = 0;
-			nmsgs = 0;
-		}
-	} else if (!nmsgs++ || time_after(nc, nextmsg_time)) {
-		nextmsg_time = nc + HZ / 2;
-	}
-
-	return supp_msgs;
-}
-
-static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
-	unsigned long flags;
-	int expected;
-
-	if (ipath_debug & __IPATH_DBG) {
-		char msg[128];
-		ipath_decode_err(dd, msg, sizeof msg, errs &
-			INFINIPATH_E_SDMAERRS);
-		ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
-	}
-	if (ipath_debug & __IPATH_VERBDBG) {
-		unsigned long tl, hd, status, lengen;
-		tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
-		hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
-		status = ipath_read_kreg64(dd
-			, dd->ipath_kregs->kr_senddmastatus);
-		lengen = ipath_read_kreg64(dd,
-			dd->ipath_kregs->kr_senddmalengen);
-		ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
-			"lengen 0x%lx\n", tl, hd, status, lengen);
-	}
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-	__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-	expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-	if (!expected)
-		ipath_cancel_sends(dd, 1);
-}
-
-static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
-{
-	unsigned long flags;
-	int expected;
-
-	if ((istat & INFINIPATH_I_SDMAINT) &&
-	    !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-		ipath_sdma_intr(dd);
-
-	if (istat & INFINIPATH_I_SDMADISABLED) {
-		expected = test_bit(IPATH_SDMA_ABORTING,
-			&dd->ipath_sdma_status);
-		ipath_dbg("%s SDmaDisabled intr\n",
-			expected ? "expected" : "unexpected");
-		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-		__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-		if (!expected)
-			ipath_cancel_sends(dd, 1);
-		if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-			tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-	}
-}
-
-static int handle_hdrq_full(struct ipath_devdata *dd)
-{
-	int chkerrpkts = 0;
-	u32 hd, tl;
-	u32 i;
-
-	ipath_stats.sps_hdrqfull++;
-	for (i = 0; i < dd->ipath_cfgports; i++) {
-		struct ipath_portdata *pd = dd->ipath_pd[i];
-
-		if (i == 0) {
-			/*
-			 * For kernel receive queues, we just want to know
-			 * if there are packets in the queue that we can
-			 * process.
-			 */
-			if (pd->port_head != ipath_get_hdrqtail(pd))
-				chkerrpkts |= 1 << i;
-			continue;
-		}
-
-		/* Skip if user context is not open */
-		if (!pd || !pd->port_cnt)
-			continue;
-
-		/* Don't report the same point multiple times. */
-		if (dd->ipath_flags & IPATH_NODMA_RTAIL)
-			tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
-		else
-			tl = ipath_get_rcvhdrtail(pd);
-		if (tl == pd->port_lastrcvhdrqtail)
-			continue;
-
-		hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
-		if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
-			pd->port_lastrcvhdrqtail = tl;
-			pd->port_hdrqfull++;
-			/* flush hdrqfull so that poll() sees it */
-			wmb();
-			wake_up_interruptible(&pd->port_wait);
-		}
-	}
-
-	return chkerrpkts;
-}
-
-static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
-	char msg[128];
-	u64 ignore_this_time = 0;
-	u64 iserr = 0;
-	int chkerrpkts = 0, noprint = 0;
-	unsigned supp_msgs;
-	int log_idx;
-
-	/*
-	 * don't report errors that are masked, either at init
-	 * (not set in ipath_errormask), or temporarily (set in
-	 * ipath_maskederrs)
-	 */
-	errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
-
-	supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
-		&noprint);
-
-	/* do these first, they are most important */
-	if (errs & INFINIPATH_E_HARDWARE) {
-		/* reuse same msg buf */
-		dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
-	} else {
-		u64 mask;
-		for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
-			mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
-			if (errs & mask)
-				ipath_inc_eeprom_err(dd, log_idx, 1);
-		}
-	}
-
-	if (errs & INFINIPATH_E_SDMAERRS)
-		handle_sdma_errors(dd, errs);
-
-	if (!noprint && (errs & ~dd->ipath_e_bitsextant))
-		ipath_dev_err(dd, "error interrupt with unknown errors "
-			      "%llx set\n", (unsigned long long)
-			      (errs & ~dd->ipath_e_bitsextant));
-
-	if (errs & E_SUM_ERRS)
-		ignore_this_time = handle_e_sum_errs(dd, errs);
-	else if ((errs & E_SUM_LINK_PKTERRS) &&
-	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-		/*
-		 * This can happen when SMA is trying to bring the link
-		 * up, but the IB link changes state at the "wrong" time.
-		 * The IB logic then complains that the packet isn't
-		 * valid.  We don't want to confuse people, so we just
-		 * don't print them, except at debug
-		 */
-		ipath_dbg("Ignoring packet errors %llx, because link not "
-			  "ACTIVE\n", (unsigned long long) errs);
-		ignore_this_time = errs & E_SUM_LINK_PKTERRS;
-	}
-
-	if (supp_msgs == 250000) {
-		int s_iserr;
-		/*
-		 * It's not entirely reasonable assuming that the errors set
-		 * in the last clear period are all responsible for the
-		 * problem, but the alternative is to assume it's the only
-		 * ones on this particular interrupt, which also isn't great
-		 */
-		dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
-
-		dd->ipath_errormask &= ~dd->ipath_maskederrs;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-				 dd->ipath_errormask);
-		s_iserr = ipath_decode_err(dd, msg, sizeof msg,
-					   dd->ipath_maskederrs);
-
-		if (dd->ipath_maskederrs &
-		    ~(INFINIPATH_E_RRCVEGRFULL |
-		      INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
-			ipath_dev_err(dd, "Temporarily disabling "
-			    "error(s) %llx reporting; too frequent (%s)\n",
-				(unsigned long long) dd->ipath_maskederrs,
-				msg);
-		else {
-			/*
-			 * rcvegrfull and rcvhdrqfull are "normal",
-			 * for some types of processes (mostly benchmarks)
-			 * that send huge numbers of messages, while not
-			 * processing them.  So only complain about
-			 * these at debug level.
-			 */
-			if (s_iserr)
-				ipath_dbg("Temporarily disabling reporting "
-				    "too frequent queue full errors (%s)\n",
-				    msg);
-			else
-				ipath_cdbg(ERRPKT,
-				    "Temporarily disabling reporting too"
-				    " frequent packet errors (%s)\n",
-				    msg);
-		}
-
-		/*
-		 * Re-enable the masked errors after around 3 minutes.  in
-		 * ipath_get_faststats().  If we have a series of fast
-		 * repeating but different errors, the interval will keep
-		 * stretching out, but that's OK, as that's pretty
-		 * catastrophic.
-		 */
-		dd->ipath_unmasktime = jiffies + HZ * 180;
-	}
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
-	if (ignore_this_time)
-		errs &= ~ignore_this_time;
-	if (errs & ~dd->ipath_lasterror) {
-		errs &= ~dd->ipath_lasterror;
-		/* never suppress duplicate hwerrors or ibstatuschange */
-		dd->ipath_lasterror |= errs &
-			~(INFINIPATH_E_HARDWARE |
-			  INFINIPATH_E_IBSTATUSCHANGED);
-	}
-
-	if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
-		dd->ipath_spectriggerhit++;
-		ipath_dbg("%lu special trigger hits\n",
-			dd->ipath_spectriggerhit);
-	}
-
-	/* likely due to cancel; so suppress message unless verbose */
-	if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
-		time_after(dd->ipath_lastcancel, jiffies)) {
-		/* armlaunch takes precedence; it often causes both. */
-		ipath_cdbg(VERBOSE,
-			"Suppressed %s error (%llx) after sendbuf cancel\n",
-			(errs &  INFINIPATH_E_SPIOARMLAUNCH) ?
-			"armlaunch" : "sendpktlen", (unsigned long long)errs);
-		errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
-	}
-
-	if (!errs)
-		return 0;
-
-	if (!noprint) {
-		ipath_err_t mask;
-		/*
-		 * The ones we mask off are handled specially below
-		 * or above.  Also mask SDMADISABLED by default as it
-		 * is too chatty.
-		 */
-		mask = INFINIPATH_E_IBSTATUSCHANGED |
-			INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-			INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
-
-		/* if we're in debug, then don't mask SDMADISABLED msgs */
-		if (ipath_debug & __IPATH_DBG)
-			mask &= ~INFINIPATH_E_SDMADISABLED;
-
-		ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
-	} else
-		/* so we don't need if (!noprint) at strlcat's below */
-		*msg = 0;
-
-	if (errs & E_SUM_PKTERRS) {
-		ipath_stats.sps_pkterrs++;
-		chkerrpkts = 1;
-	}
-	if (errs & E_SUM_ERRS)
-		ipath_stats.sps_errs++;
-
-	if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
-		ipath_stats.sps_crcerrs++;
-		chkerrpkts = 1;
-	}
-	iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
-
-
-	/*
-	 * We don't want to print these two as they happen, or we can make
-	 * the situation even worse, because it takes so long to print
-	 * messages to serial consoles.  Kernel ports get printed from
-	 * fast_stats, no more than every 5 seconds, user ports get printed
-	 * on close
-	 */
-	if (errs & INFINIPATH_E_RRCVHDRFULL)
-		chkerrpkts |= handle_hdrq_full(dd);
-	if (errs & INFINIPATH_E_RRCVEGRFULL) {
-		struct ipath_portdata *pd = dd->ipath_pd[0];
-
-		/*
-		 * since this is of less importance and not likely to
-		 * happen without also getting hdrfull, only count
-		 * occurrences; don't check each port (or even the kernel
-		 * vs user)
-		 */
-		ipath_stats.sps_etidfull++;
-		if (pd->port_head != ipath_get_hdrqtail(pd))
-			chkerrpkts |= 1;
-	}
-
-	/*
-	 * do this before IBSTATUSCHANGED, in case both bits set in a single
-	 * interrupt; we want the STATUSCHANGE to "win", so we do our
-	 * internal copy of state machine correctly
-	 */
-	if (errs & INFINIPATH_E_RIBLOSTLINK) {
-		/*
-		 * force through block below
-		 */
-		errs |= INFINIPATH_E_IBSTATUSCHANGED;
-		ipath_stats.sps_iblink++;
-		dd->ipath_flags |= IPATH_LINKDOWN;
-		dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-				     | IPATH_LINKARMED | IPATH_LINKACTIVE);
-		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-
-		ipath_dbg("Lost link, link now down (%s)\n",
-			ipath_ibcstatus_str[ipath_read_kreg64(dd,
-			dd->ipath_kregs->kr_ibcstatus) & 0xf]);
-	}
-	if (errs & INFINIPATH_E_IBSTATUSCHANGED)
-		handle_e_ibstatuschanged(dd, errs);
-
-	if (errs & INFINIPATH_E_RESET) {
-		if (!noprint)
-			ipath_dev_err(dd, "Got reset, requires re-init "
-				      "(unload and reload driver)\n");
-		dd->ipath_flags &= ~IPATH_INITTED;	/* needs re-init */
-		/* mark as having had error */
-		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-		*dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
-	}
-
-	if (!noprint && *msg) {
-		if (iserr)
-			ipath_dev_err(dd, "%s error\n", msg);
-	}
-	if (dd->ipath_state_wanted & dd->ipath_flags) {
-		ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
-			   "waking\n", dd->ipath_state_wanted,
-			   dd->ipath_flags);
-		wake_up_interruptible(&ipath_state_wait);
-	}
-
-	return chkerrpkts;
-}
-
-/*
- * try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- */
-void ipath_clear_freeze(struct ipath_devdata *dd)
-{
-	/* disable error interrupts, to avoid confusion */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
-
-	/* also disable interrupts; errormask is sometimes overwriten */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
-	ipath_cancel_sends(dd, 1);
-
-	/* clear the freeze, and be sure chip saw it */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-	/* force in-memory update now we are out of freeze */
-	ipath_force_pio_avail_update(dd);
-
-	/*
-	 * force new interrupt if any hwerr, error or interrupt bits are
-	 * still set, and clear "safe" send packet errors related to freeze
-	 * and cancelling sends.  Re-enable error interrupts before possible
-	 * force of re-interrupt on pending interrupts.
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-		E_SPKT_ERRS_IGNORE);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-		dd->ipath_errormask);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-}
-
-
-/* this is separate to allow for better optimization of ipath_intr() */
-
-static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
-{
-	/*
-	 * sometimes happen during driver init and unload, don't want
-	 * to process any interrupts at that point
-	 */
-
-	/* this is just a bandaid, not a fix, if something goes badly
-	 * wrong */
-	if (++*unexpectp > 100) {
-		if (++*unexpectp > 105) {
-			/*
-			 * ok, we must be taking somebody else's interrupts,
-			 * due to a messed up mptable and/or PIRQ table, so
-			 * unregister the interrupt.  We've seen this during
-			 * linuxbios development work, and it may happen in
-			 * the future again.
-			 */
-			if (dd->pcidev && dd->ipath_irq) {
-				ipath_dev_err(dd, "Now %u unexpected "
-					      "interrupts, unregistering "
-					      "interrupt handler\n",
-					      *unexpectp);
-				ipath_dbg("free_irq of irq %d\n",
-					  dd->ipath_irq);
-				dd->ipath_f_free_irq(dd);
-			}
-		}
-		if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
-			ipath_dev_err(dd, "%u unexpected interrupts, "
-				      "disabling interrupts completely\n",
-				      *unexpectp);
-			/*
-			 * disable all interrupts, something is very wrong
-			 */
-			ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
-					 0ULL);
-		}
-	} else if (*unexpectp > 1)
-		ipath_dbg("Interrupt when not ready, should not happen, "
-			  "ignoring\n");
-}
-
-static noinline void ipath_bad_regread(struct ipath_devdata *dd)
-{
-	static int allbits;
-
-	/* separate routine, for better optimization of ipath_intr() */
-
-	/*
-	 * We print the message and disable interrupts, in hope of
-	 * having a better chance of debugging the problem.
-	 */
-	ipath_dev_err(dd,
-		      "Read of interrupt status failed (all bits set)\n");
-	if (allbits++) {
-		/* disable all interrupts, something is very wrong */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-		if (allbits == 2) {
-			ipath_dev_err(dd, "Still bad interrupt status, "
-				      "unregistering interrupt\n");
-			dd->ipath_f_free_irq(dd);
-		} else if (allbits > 2) {
-			if ((allbits % 10000) == 0)
-				printk(".");
-		} else
-			ipath_dev_err(dd, "Disabling interrupts, "
-				      "multiple errors\n");
-	}
-}
-
-static void handle_layer_pioavail(struct ipath_devdata *dd)
-{
-	unsigned long flags;
-	int ret;
-
-	ret = ipath_ib_piobufavail(dd->verbs_dev);
-	if (ret > 0)
-		goto set;
-
-	return;
-set:
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			 dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-/*
- * Handle receive interrupts for user ports; this means a user
- * process was waiting for a packet to arrive, and didn't want
- * to poll
- */
-static void handle_urcv(struct ipath_devdata *dd, u64 istat)
-{
-	u64 portr;
-	int i;
-	int rcvdint = 0;
-
-	/*
-	 * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
-	 * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
-	 * would both like timely updates of the bits so that
-	 * we don't pass them by unnecessarily.  the rmb()
-	 * here ensures that we see them promptly -- the
-	 * corresponding wmb()'s are in ipath_poll_urgent()
-	 * and ipath_poll_next()...
-	 */
-	rmb();
-	portr = ((istat >> dd->ipath_i_rcvavail_shift) &
-		 dd->ipath_i_rcvavail_mask) |
-		((istat >> dd->ipath_i_rcvurg_shift) &
-		 dd->ipath_i_rcvurg_mask);
-	for (i = 1; i < dd->ipath_cfgports; i++) {
-		struct ipath_portdata *pd = dd->ipath_pd[i];
-
-		if (portr & (1 << i) && pd && pd->port_cnt) {
-			if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
-					       &pd->port_flag)) {
-				clear_bit(i + dd->ipath_r_intravail_shift,
-					  &dd->ipath_rcvctrl);
-				wake_up_interruptible(&pd->port_wait);
-				rcvdint = 1;
-			} else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,
-						      &pd->port_flag)) {
-				pd->port_urgent++;
-				wake_up_interruptible(&pd->port_wait);
-			}
-		}
-	}
-	if (rcvdint) {
-		/* only want to take one interrupt, so turn off the rcv
-		 * interrupt for all the ports that we set the rcv_waiting
-		 * (but never for kernel port)
-		 */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-				 dd->ipath_rcvctrl);
-	}
-}
-
-irqreturn_t ipath_intr(int irq, void *data)
-{
-	struct ipath_devdata *dd = data;
-	u64 istat, chk0rcv = 0;
-	ipath_err_t estat = 0;
-	irqreturn_t ret;
-	static unsigned unexpected = 0;
-	u64 kportrbits;
-
-	ipath_stats.sps_ints++;
-
-	if (dd->ipath_int_counter != (u32) -1)
-		dd->ipath_int_counter++;
-
-	if (!(dd->ipath_flags & IPATH_PRESENT)) {
-		/*
-		 * This return value is not great, but we do not want the
-		 * interrupt core code to remove our interrupt handler
-		 * because we don't appear to be handling an interrupt
-		 * during a chip reset.
-		 */
-		return IRQ_HANDLED;
-	}
-
-	/*
-	 * this needs to be flags&initted, not statusp, so we keep
-	 * taking interrupts even after link goes down, etc.
-	 * Also, we *must* clear the interrupt at some point, or we won't
-	 * take it again, which can be real bad for errors, etc...
-	 */
-
-	if (!(dd->ipath_flags & IPATH_INITTED)) {
-		ipath_bad_intr(dd, &unexpected);
-		ret = IRQ_NONE;
-		goto bail;
-	}
-
-	istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
-
-	if (unlikely(!istat)) {
-		ipath_stats.sps_nullintr++;
-		ret = IRQ_NONE; /* not our interrupt, or already handled */
-		goto bail;
-	}
-	if (unlikely(istat == -1)) {
-		ipath_bad_regread(dd);
-		/* don't know if it was our interrupt or not */
-		ret = IRQ_NONE;
-		goto bail;
-	}
-
-	if (unexpected)
-		unexpected = 0;
-
-	if (unlikely(istat & ~dd->ipath_i_bitsextant))
-		ipath_dev_err(dd,
-			      "interrupt with unknown interrupts %Lx set\n",
-			      (unsigned long long)
-			      istat & ~dd->ipath_i_bitsextant);
-	else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
-		ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
-			(unsigned long long) istat);
-
-	if (istat & INFINIPATH_I_ERROR) {
-		ipath_stats.sps_errints++;
-		estat = ipath_read_kreg64(dd,
-					  dd->ipath_kregs->kr_errorstatus);
-		if (!estat)
-			dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
-				 "but no error bits set!\n",
-				 (unsigned long long) istat);
-		else if (estat == -1LL)
-			/*
-			 * should we try clearing all, or hope next read
-			 * works?
-			 */
-			ipath_dev_err(dd, "Read of error status failed "
-				      "(all bits set); ignoring\n");
-		else
-			chk0rcv |= handle_errors(dd, estat);
-	}
-
-	if (istat & INFINIPATH_I_GPIO) {
-		/*
-		 * GPIO interrupts fall in two broad classes:
-		 * GPIO_2 indicates (on some HT4xx boards) that a packet
-		 *        has arrived for Port 0. Checking for this
-		 *        is controlled by flag IPATH_GPIO_INTR.
-		 * GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate
-		 *        errors that we need to count. Checking for this
-		 *        is controlled by flag IPATH_GPIO_ERRINTRS.
-		 */
-		u32 gpiostatus;
-		u32 to_clear = 0;
-
-		gpiostatus = ipath_read_kreg32(
-			dd, dd->ipath_kregs->kr_gpio_status);
-		/* First the error-counter case. */
-		if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
-		    (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
-			/* want to clear the bits we see asserted. */
-			to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);
-
-			/*
-			 * Count appropriately, clear bits out of our copy,
-			 * as they have been "handled".
-			 */
-			if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {
-				ipath_dbg("FlowCtl on UnsupVL\n");
-				dd->ipath_rxfc_unsupvl_errs++;
-			}
-			if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {
-				ipath_dbg("Overrun Threshold exceeded\n");
-				dd->ipath_overrun_thresh_errs++;
-			}
-			if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {
-				ipath_dbg("Local Link Integrity error\n");
-				dd->ipath_lli_errs++;
-			}
-			gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;
-		}
-		/* Now the Port0 Receive case */
-		if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&
-		    (dd->ipath_flags & IPATH_GPIO_INTR)) {
-			/*
-			 * GPIO status bit 2 is set, and we expected it.
-			 * clear it and indicate in p0bits.
-			 * This probably only happens if a Port0 pkt
-			 * arrives at _just_ the wrong time, and we
-			 * handle that by seting chk0rcv;
-			 */
-			to_clear |= (1 << IPATH_GPIO_PORT0_BIT);
-			gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
-			chk0rcv = 1;
-		}
-		if (gpiostatus) {
-			/*
-			 * Some unexpected bits remain. If they could have
-			 * caused the interrupt, complain and clear.
-			 * To avoid repetition of this condition, also clear
-			 * the mask. It is almost certainly due to error.
-			 */
-			const u32 mask = (u32) dd->ipath_gpio_mask;
-
-			if (mask & gpiostatus) {
-				ipath_dbg("Unexpected GPIO IRQ bits %x\n",
-				  gpiostatus & mask);
-				to_clear |= (gpiostatus & mask);
-				dd->ipath_gpio_mask &= ~(gpiostatus & mask);
-				ipath_write_kreg(dd,
-					dd->ipath_kregs->kr_gpio_mask,
-					dd->ipath_gpio_mask);
-			}
-		}
-		if (to_clear) {
-			ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
-					(u64) to_clear);
-		}
-	}
-
-	/*
-	 * Clear the interrupt bits we found set, unless they are receive
-	 * related, in which case we already cleared them above, and don't
-	 * want to clear them again, because we might lose an interrupt.
-	 * Clear it early, so we "know" know the chip will have seen this by
-	 * the time we process the queue, and will re-interrupt if necessary.
-	 * The processor itself won't take the interrupt again until we return.
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
-
-	/*
-	 * Handle kernel receive queues before checking for pio buffers
-	 * available since receives can overflow; piobuf waiters can afford
-	 * a few extra cycles, since they were waiting anyway, and user's
-	 * waiting for receive are at the bottom.
-	 */
-	kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
-		(1ULL << dd->ipath_i_rcvurg_shift);
-	if (chk0rcv || (istat & kportrbits)) {
-		istat &= ~kportrbits;
-		ipath_kreceive(dd->ipath_pd[0]);
-	}
-
-	if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
-		     (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
-		handle_urcv(dd, istat);
-
-	if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
-		handle_sdma_intr(dd, istat);
-
-	if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-		dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-				 dd->ipath_sendctrl);
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-		/* always process; sdma verbs uses PIO for acks and VL15  */
-		handle_layer_pioavail(dd);
-	}
-
-	ret = IRQ_HANDLED;
-
-bail:
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_kernel.h b/drivers/staging/rdma/ipath/ipath_kernel.h
deleted file mode 100644
index 66c934a..0000000
--- a/drivers/staging/rdma/ipath/ipath_kernel.h
+++ /dev/null
@@ -1,1374 +0,0 @@
-#ifndef _IPATH_KERNEL_H
-#define _IPATH_KERNEL_H
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This header file is the base header file for infinipath kernel code
- * ipath_user.h serves a similar purpose for user code.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <asm/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_common.h"
-#include "ipath_debug.h"
-#include "ipath_registers.h"
-
-/* only s/w major version of InfiniPath we can handle */
-#define IPATH_CHIP_VERS_MAJ 2U
-
-/* don't care about this except printing */
-#define IPATH_CHIP_VERS_MIN 0U
-
-/* temporary, maybe always */
-extern struct infinipath_stats ipath_stats;
-
-#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
-/*
- * First-cut critierion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
-/*
- * Struct used to indicate which errors are logged in each of the
- * error-counters that are logged to EEPROM. A counter is incremented
- * _once_ (saturating at 255) for each event with any bits set in
- * the error or hwerror register masks below.
- */
-#define IPATH_EEP_LOG_CNT (4)
-struct ipath_eep_log_mask {
-	u64 errs_to_log;
-	u64 hwerrs_to_log;
-};
-
-struct ipath_portdata {
-	void **port_rcvegrbuf;
-	dma_addr_t *port_rcvegrbuf_phys;
-	/* rcvhdrq base, needs mmap before useful */
-	void *port_rcvhdrq;
-	/* kernel virtual address where hdrqtail is updated */
-	void *port_rcvhdrtail_kvaddr;
-	/*
-	 * temp buffer for expected send setup, allocated at open, instead
-	 * of each setup call
-	 */
-	void *port_tid_pg_list;
-	/* when waiting for rcv or pioavail */
-	wait_queue_head_t port_wait;
-	/*
-	 * rcvegr bufs base, physical, must fit
-	 * in 44 bits so 32 bit programs mmap64 44 bit works)
-	 */
-	dma_addr_t port_rcvegr_phys;
-	/* mmap of hdrq, must fit in 44 bits */
-	dma_addr_t port_rcvhdrq_phys;
-	dma_addr_t port_rcvhdrqtailaddr_phys;
-	/*
-	 * number of opens (including slave subports) on this instance
-	 * (ignoring forks, dup, etc. for now)
-	 */
-	int port_cnt;
-	/*
-	 * how much space to leave at start of eager TID entries for
-	 * protocol use, on each TID
-	 */
-	/* instead of calculating it */
-	unsigned port_port;
-	/* non-zero if port is being shared. */
-	u16 port_subport_cnt;
-	/* non-zero if port is being shared. */
-	u16 port_subport_id;
-	/* number of pio bufs for this port (all procs, if shared) */
-	u32 port_piocnt;
-	/* first pio buffer for this port */
-	u32 port_pio_base;
-	/* chip offset of PIO buffers for this port */
-	u32 port_piobufs;
-	/* how many alloc_pages() chunks in port_rcvegrbuf_pages */
-	u32 port_rcvegrbuf_chunks;
-	/* how many egrbufs per chunk */
-	u32 port_rcvegrbufs_perchunk;
-	/* order for port_rcvegrbuf_pages */
-	size_t port_rcvegrbuf_size;
-	/* rcvhdrq size (for freeing) */
-	size_t port_rcvhdrq_size;
-	/* next expected TID to check when looking for free */
-	u32 port_tidcursor;
-	/* next expected TID to check */
-	unsigned long port_flag;
-	/* what happened */
-	unsigned long int_flag;
-	/* WAIT_RCV that timed out, no interrupt */
-	u32 port_rcvwait_to;
-	/* WAIT_PIO that timed out, no interrupt */
-	u32 port_piowait_to;
-	/* WAIT_RCV already happened, no wait */
-	u32 port_rcvnowait;
-	/* WAIT_PIO already happened, no wait */
-	u32 port_pionowait;
-	/* total number of rcvhdrqfull errors */
-	u32 port_hdrqfull;
-	/*
-	 * Used to suppress multiple instances of same
-	 * port staying stuck at same point.
-	 */
-	u32 port_lastrcvhdrqtail;
-	/* saved total number of rcvhdrqfull errors for poll edge trigger */
-	u32 port_hdrqfull_poll;
-	/* total number of polled urgent packets */
-	u32 port_urgent;
-	/* saved total number of polled urgent packets for poll edge trigger */
-	u32 port_urgent_poll;
-	/* pid of process using this port */
-	struct pid *port_pid;
-	struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
-	/* same size as task_struct .comm[] */
-	char port_comm[TASK_COMM_LEN];
-	/* pkeys set by this use of this port */
-	u16 port_pkeys[4];
-	/* so file ops can get at unit */
-	struct ipath_devdata *port_dd;
-	/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
-	void *subport_uregbase;
-	/* An array of pages for the eager receive buffers * N */
-	void *subport_rcvegrbuf;
-	/* An array of pages for the eager header queue entries * N */
-	void *subport_rcvhdr_base;
-	/* The version of the library which opened this port */
-	u32 userversion;
-	/* Bitmask of active slaves */
-	u32 active_slaves;
-	/* Type of packets or conditions we want to poll for */
-	u16 poll_type;
-	/* port rcvhdrq head offset */
-	u32 port_head;
-	/* receive packet sequence counter */
-	u32 port_seq_cnt;
-};
-
-struct sk_buff;
-struct ipath_sge_state;
-struct ipath_verbs_txreq;
-
-/*
- * control information for layered drivers
- */
-struct _ipath_layer {
-	void *l_arg;
-};
-
-struct ipath_skbinfo {
-	struct sk_buff *skb;
-	dma_addr_t phys;
-};
-
-struct ipath_sdma_txreq {
-	int                 flags;
-	int                 sg_count;
-	union {
-		struct scatterlist *sg;
-		void *map_addr;
-	};
-	void              (*callback)(void *, int);
-	void               *callback_cookie;
-	int                 callback_status;
-	u16                 start_idx;  /* sdma private */
-	u16                 next_descq_idx;  /* sdma private */
-	struct list_head    list;       /* sdma private */
-};
-
-struct ipath_sdma_desc {
-	__le64 qw[2];
-};
-
-#define IPATH_SDMA_TXREQ_F_USELARGEBUF  0x1
-#define IPATH_SDMA_TXREQ_F_HEADTOHOST   0x2
-#define IPATH_SDMA_TXREQ_F_INTREQ       0x4
-#define IPATH_SDMA_TXREQ_F_FREEBUF      0x8
-#define IPATH_SDMA_TXREQ_F_FREEDESC     0x10
-#define IPATH_SDMA_TXREQ_F_VL15         0x20
-
-#define IPATH_SDMA_TXREQ_S_OK        0
-#define IPATH_SDMA_TXREQ_S_SENDERROR 1
-#define IPATH_SDMA_TXREQ_S_ABORTED   2
-#define IPATH_SDMA_TXREQ_S_SHUTDOWN  3
-
-#define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG	(1ull << 63)
-#define IPATH_SDMA_STATUS_ABORT_IN_PROG			(1ull << 62)
-#define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE		(1ull << 61)
-#define IPATH_SDMA_STATUS_SCB_EMPTY			(1ull << 30)
-
-/* max dwords in small buffer packet */
-#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
-
-/*
- * Possible IB config parameters for ipath_f_get/set_ib_cfg()
- */
-#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
-#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
-#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
-#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
-#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
-#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
-#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
-#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
-#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
-#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
-#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
-
-
-struct ipath_devdata {
-	struct list_head ipath_list;
-
-	struct ipath_kregs const *ipath_kregs;
-	struct ipath_cregs const *ipath_cregs;
-
-	/* mem-mapped pointer to base of chip regs */
-	u64 __iomem *ipath_kregbase;
-	/* end of mem-mapped chip space; range checking */
-	u64 __iomem *ipath_kregend;
-	/* physical address of chip for io_remap, etc. */
-	unsigned long ipath_physaddr;
-	/* base of memory alloced for ipath_kregbase, for free */
-	u64 *ipath_kregalloc;
-	/* ipath_cfgports pointers */
-	struct ipath_portdata **ipath_pd;
-	/* sk_buffs used by port 0 eager receive queue */
-	struct ipath_skbinfo *ipath_port0_skbinfo;
-	/* kvirt address of 1st 2k pio buffer */
-	void __iomem *ipath_pio2kbase;
-	/* kvirt address of 1st 4k pio buffer */
-	void __iomem *ipath_pio4kbase;
-	/*
-	 * points to area where PIOavail registers will be DMA'ed.
-	 * Has to be on a page of it's own, because the page will be
-	 * mapped into user program space.  This copy is *ONLY* ever
-	 * written by DMA, not by the driver!  Need a copy per device
-	 * when we get to multiple devices
-	 */
-	volatile __le64 *ipath_pioavailregs_dma;
-	/* physical address where updates occur */
-	dma_addr_t ipath_pioavailregs_phys;
-	struct _ipath_layer ipath_layer;
-	/* setup intr */
-	int (*ipath_f_intrsetup)(struct ipath_devdata *);
-	/* fallback to alternate interrupt type if possible */
-	int (*ipath_f_intr_fallback)(struct ipath_devdata *);
-	/* setup on-chip bus config */
-	int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
-	/* hard reset chip */
-	int (*ipath_f_reset)(struct ipath_devdata *);
-	int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
-				     size_t);
-	void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
-	void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
-					size_t);
-	void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
-	int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
-	int (*ipath_f_early_init)(struct ipath_devdata *);
-	void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
-	void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
-				u32, unsigned long);
-	void (*ipath_f_tidtemplate)(struct ipath_devdata *);
-	void (*ipath_f_cleanup)(struct ipath_devdata *);
-	void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
-	/* fill out chip-specific fields */
-	int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
-	/* free irq */
-	void (*ipath_f_free_irq)(struct ipath_devdata *);
-	struct ipath_message_header *(*ipath_f_get_msgheader)
-					(struct ipath_devdata *, __le32 *);
-	void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
-	int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
-	int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
-	void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
-	void (*ipath_f_read_counters)(struct ipath_devdata *,
-					struct infinipath_counters *);
-	void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
-	/* per chip actions needed for IB Link up/down changes */
-	int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
-
-	unsigned ipath_lastegr_idx;
-	struct ipath_ibdev *verbs_dev;
-	struct timer_list verbs_timer;
-	/* total dwords sent (summed from counter) */
-	u64 ipath_sword;
-	/* total dwords rcvd (summed from counter) */
-	u64 ipath_rword;
-	/* total packets sent (summed from counter) */
-	u64 ipath_spkts;
-	/* total packets rcvd (summed from counter) */
-	u64 ipath_rpkts;
-	/* ipath_statusp initially points to this. */
-	u64 _ipath_status;
-	/* GUID for this interface, in network order */
-	__be64 ipath_guid;
-	/*
-	 * aggregrate of error bits reported since last cleared, for
-	 * limiting of error reporting
-	 */
-	ipath_err_t ipath_lasterror;
-	/*
-	 * aggregrate of error bits reported since last cleared, for
-	 * limiting of hwerror reporting
-	 */
-	ipath_err_t ipath_lasthwerror;
-	/* errors masked because they occur too fast */
-	ipath_err_t ipath_maskederrs;
-	u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
-	/* these 5 fields are used to establish deltas for IB Symbol
-	 * errors and linkrecovery errors. They can be reported on
-	 * some chips during link negotiation prior to INIT, and with
-	 * DDR when faking DDR negotiations with non-IBTA switches.
-	 * The chip counters are adjusted at driver unload if there is
-	 * a non-zero delta.
-	 */
-	u64 ibdeltainprog;
-	u64 ibsymdelta;
-	u64 ibsymsnap;
-	u64 iblnkerrdelta;
-	u64 iblnkerrsnap;
-
-	/* time in jiffies at which to re-enable maskederrs */
-	unsigned long ipath_unmasktime;
-	/* count of egrfull errors, combined for all ports */
-	u64 ipath_last_tidfull;
-	/* for ipath_qcheck() */
-	u64 ipath_lastport0rcv_cnt;
-	/* template for writing TIDs  */
-	u64 ipath_tidtemplate;
-	/* value to write to free TIDs */
-	u64 ipath_tidinvalid;
-	/* IBA6120 rcv interrupt setup */
-	u64 ipath_rhdrhead_intr_off;
-
-	/* size of memory at ipath_kregbase */
-	u32 ipath_kregsize;
-	/* number of registers used for pioavail */
-	u32 ipath_pioavregs;
-	/* IPATH_POLL, etc. */
-	u32 ipath_flags;
-	/* ipath_flags driver is waiting for */
-	u32 ipath_state_wanted;
-	/* last buffer for user use, first buf for kernel use is this
-	 * index. */
-	u32 ipath_lastport_piobuf;
-	/* is a stats timer active */
-	u32 ipath_stats_timer_active;
-	/* number of interrupts for this device -- saturates... */
-	u32 ipath_int_counter;
-	/* dwords sent read from counter */
-	u32 ipath_lastsword;
-	/* dwords received read from counter */
-	u32 ipath_lastrword;
-	/* sent packets read from counter */
-	u32 ipath_lastspkts;
-	/* received packets read from counter */
-	u32 ipath_lastrpkts;
-	/* pio bufs allocated per port */
-	u32 ipath_pbufsport;
-	/* if remainder on bufs/port, ports < extrabuf get 1 extra */
-	u32 ipath_ports_extrabuf;
-	u32 ipath_pioupd_thresh; /* update threshold, some chips */
-	/*
-	 * number of ports configured as max; zero is set to number chip
-	 * supports, less gives more pio bufs/port, etc.
-	 */
-	u32 ipath_cfgports;
-	/* count of port 0 hdrqfull errors */
-	u32 ipath_p0_hdrqfull;
-	/* port 0 number of receive eager buffers */
-	u32 ipath_p0_rcvegrcnt;
-
-	/*
-	 * index of last piobuffer we used.  Speeds up searching, by
-	 * starting at this point.  Doesn't matter if multiple cpu's use and
-	 * update, last updater is only write that matters.  Whenever it
-	 * wraps, we update shadow copies.  Need a copy per device when we
-	 * get to multiple devices
-	 */
-	u32 ipath_lastpioindex;
-	u32 ipath_lastpioindexl;
-	/* max length of freezemsg */
-	u32 ipath_freezelen;
-	/*
-	 * consecutive times we wanted a PIO buffer but were unable to
-	 * get one
-	 */
-	u32 ipath_consec_nopiobuf;
-	/*
-	 * hint that we should update ipath_pioavailshadow before
-	 * looking for a PIO buffer
-	 */
-	u32 ipath_upd_pio_shadow;
-	/* so we can rewrite it after a chip reset */
-	u32 ipath_pcibar0;
-	/* so we can rewrite it after a chip reset */
-	u32 ipath_pcibar1;
-	u32 ipath_x1_fix_tries;
-	u32 ipath_autoneg_tries;
-	u32 serdes_first_init_done;
-
-	struct ipath_relock {
-		atomic_t ipath_relock_timer_active;
-		struct timer_list ipath_relock_timer;
-		unsigned int ipath_relock_interval; /* in jiffies */
-	} ipath_relock_singleton;
-
-	/* interrupt number */
-	int ipath_irq;
-	/* HT/PCI Vendor ID (here for NodeInfo) */
-	u16 ipath_vendorid;
-	/* HT/PCI Device ID (here for NodeInfo) */
-	u16 ipath_deviceid;
-	/* offset in HT config space of slave/primary interface block */
-	u8 ipath_ht_slave_off;
-	/* for write combining settings */
-	int wc_cookie;
-	/* ref count for each pkey */
-	atomic_t ipath_pkeyrefs[4];
-	/* shadow copy of struct page *'s for exp tid pages */
-	struct page **ipath_pageshadow;
-	/* shadow copy of dma handles for exp tid pages */
-	dma_addr_t *ipath_physshadow;
-	u64 __iomem *ipath_egrtidbase;
-	/* lock to workaround chip bug 9437 and others */
-	spinlock_t ipath_kernel_tid_lock;
-	spinlock_t ipath_user_tid_lock;
-	spinlock_t ipath_sendctrl_lock;
-	/* around ipath_pd and (user ports) port_cnt use (intr vs free) */
-	spinlock_t ipath_uctxt_lock;
-
-	/*
-	 * IPATH_STATUS_*,
-	 * this address is mapped readonly into user processes so they can
-	 * get status cheaply, whenever they want.
-	 */
-	u64 *ipath_statusp;
-	/* freeze msg if hw error put chip in freeze */
-	char *ipath_freezemsg;
-	/* pci access data structure */
-	struct pci_dev *pcidev;
-	struct cdev *user_cdev;
-	struct cdev *diag_cdev;
-	struct device *user_dev;
-	struct device *diag_dev;
-	/* timer used to prevent stats overflow, error throttling, etc. */
-	struct timer_list ipath_stats_timer;
-	/* timer to verify interrupts work, and fallback if possible */
-	struct timer_list ipath_intrchk_timer;
-	void *ipath_dummy_hdrq;	/* used after port close */
-	dma_addr_t ipath_dummy_hdrq_phys;
-
-	/* SendDMA related entries */
-	spinlock_t            ipath_sdma_lock;
-	unsigned long         ipath_sdma_status;
-	unsigned long         ipath_sdma_abort_jiffies;
-	unsigned long         ipath_sdma_abort_intr_timeout;
-	unsigned long         ipath_sdma_buf_jiffies;
-	struct ipath_sdma_desc *ipath_sdma_descq;
-	u64		      ipath_sdma_descq_added;
-	u64		      ipath_sdma_descq_removed;
-	int		      ipath_sdma_desc_nreserved;
-	u16                   ipath_sdma_descq_cnt;
-	u16                   ipath_sdma_descq_tail;
-	u16                   ipath_sdma_descq_head;
-	u16                   ipath_sdma_next_intr;
-	u16                   ipath_sdma_reset_wait;
-	u8                    ipath_sdma_generation;
-	struct tasklet_struct ipath_sdma_abort_task;
-	struct tasklet_struct ipath_sdma_notify_task;
-	struct list_head      ipath_sdma_activelist;
-	struct list_head      ipath_sdma_notifylist;
-	atomic_t              ipath_sdma_vl15_count;
-	struct timer_list     ipath_sdma_vl15_timer;
-
-	dma_addr_t       ipath_sdma_descq_phys;
-	volatile __le64 *ipath_sdma_head_dma;
-	dma_addr_t       ipath_sdma_head_phys;
-
-	unsigned long ipath_ureg_align; /* user register alignment */
-
-	struct delayed_work ipath_autoneg_work;
-	wait_queue_head_t ipath_autoneg_wait;
-
-	/* HoL blocking / user app forward-progress state */
-	unsigned          ipath_hol_state;
-	unsigned          ipath_hol_next;
-	struct timer_list ipath_hol_timer;
-
-	/*
-	 * Shadow copies of registers; size indicates read access size.
-	 * Most of them are readonly, but some are write-only register,
-	 * where we manipulate the bits in the shadow copy, and then write
-	 * the shadow copy to infinipath.
-	 *
-	 * We deliberately make most of these 32 bits, since they have
-	 * restricted range.  For any that we read, we won't to generate 32
-	 * bit accesses, since Opteron will generate 2 separate 32 bit HT
-	 * transactions for a 64 bit read, and we want to avoid unnecessary
-	 * HT transactions.
-	 */
-
-	/* This is the 64 bit group */
-
-	/*
-	 * shadow of pioavail, check to be sure it's large enough at
-	 * init time.
-	 */
-	unsigned long ipath_pioavailshadow[8];
-	/* bitmap of send buffers available for the kernel to use with PIO. */
-	unsigned long ipath_pioavailkernel[8];
-	/* shadow of kr_gpio_out, for rmw ops */
-	u64 ipath_gpio_out;
-	/* shadow the gpio mask register */
-	u64 ipath_gpio_mask;
-	/* shadow the gpio output enable, etc... */
-	u64 ipath_extctrl;
-	/* kr_revision shadow */
-	u64 ipath_revision;
-	/*
-	 * shadow of ibcctrl, for interrupt handling of link changes,
-	 * etc.
-	 */
-	u64 ipath_ibcctrl;
-	/*
-	 * last ibcstatus, to suppress "duplicate" status change messages,
-	 * mostly from 2 to 3
-	 */
-	u64 ipath_lastibcstat;
-	/* hwerrmask shadow */
-	ipath_err_t ipath_hwerrmask;
-	ipath_err_t ipath_errormask; /* errormask shadow */
-	/* interrupt config reg shadow */
-	u64 ipath_intconfig;
-	/* kr_sendpiobufbase value */
-	u64 ipath_piobufbase;
-	/* kr_ibcddrctrl shadow */
-	u64 ipath_ibcddrctrl;
-
-	/* these are the "32 bit" regs */
-
-	/*
-	 * number of GUIDs in the flash for this interface; may need some
-	 * rethinking for setting on other ifaces
-	 */
-	u32 ipath_nguid;
-	/*
-	 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
-	 * all expect bit fields to be "unsigned long"
-	 */
-	/* shadow kr_rcvctrl */
-	unsigned long ipath_rcvctrl;
-	/* shadow kr_sendctrl */
-	unsigned long ipath_sendctrl;
-	/* to not count armlaunch after cancel */
-	unsigned long ipath_lastcancel;
-	/* count cases where special trigger was needed (double write) */
-	unsigned long ipath_spectriggerhit;
-
-	/* value we put in kr_rcvhdrcnt */
-	u32 ipath_rcvhdrcnt;
-	/* value we put in kr_rcvhdrsize */
-	u32 ipath_rcvhdrsize;
-	/* value we put in kr_rcvhdrentsize */
-	u32 ipath_rcvhdrentsize;
-	/* offset of last entry in rcvhdrq */
-	u32 ipath_hdrqlast;
-	/* kr_portcnt value */
-	u32 ipath_portcnt;
-	/* kr_pagealign value */
-	u32 ipath_palign;
-	/* number of "2KB" PIO buffers */
-	u32 ipath_piobcnt2k;
-	/* size in bytes of "2KB" PIO buffers */
-	u32 ipath_piosize2k;
-	/* number of "4KB" PIO buffers */
-	u32 ipath_piobcnt4k;
-	/* size in bytes of "4KB" PIO buffers */
-	u32 ipath_piosize4k;
-	u32 ipath_pioreserved; /* reserved special-inkernel; */
-	/* kr_rcvegrbase value */
-	u32 ipath_rcvegrbase;
-	/* kr_rcvegrcnt value */
-	u32 ipath_rcvegrcnt;
-	/* kr_rcvtidbase value */
-	u32 ipath_rcvtidbase;
-	/* kr_rcvtidcnt value */
-	u32 ipath_rcvtidcnt;
-	/* kr_sendregbase */
-	u32 ipath_sregbase;
-	/* kr_userregbase */
-	u32 ipath_uregbase;
-	/* kr_counterregbase */
-	u32 ipath_cregbase;
-	/* shadow the control register contents */
-	u32 ipath_control;
-	/* PCI revision register (HTC rev on FPGA) */
-	u32 ipath_pcirev;
-
-	/* chip address space used by 4k pio buffers */
-	u32 ipath_4kalign;
-	/* The MTU programmed for this unit */
-	u32 ipath_ibmtu;
-	/*
-	 * The max size IB packet, included IB headers that we can send.
-	 * Starts same as ipath_piosize, but is affected when ibmtu is
-	 * changed, or by size of eager buffers
-	 */
-	u32 ipath_ibmaxlen;
-	/*
-	 * ibmaxlen at init time, limited by chip and by receive buffer
-	 * size.  Not changed after init.
-	 */
-	u32 ipath_init_ibmaxlen;
-	/* size of each rcvegrbuffer */
-	u32 ipath_rcvegrbufsize;
-	/* localbus width (1, 2,4,8,16,32) from config space  */
-	u32 ipath_lbus_width;
-	/* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
-	u32 ipath_lbus_speed;
-	/*
-	 * number of sequential ibcstatus change for polling active/quiet
-	 * (i.e., link not coming up).
-	 */
-	u32 ipath_ibpollcnt;
-	/* low and high portions of MSI capability/vector */
-	u32 ipath_msi_lo;
-	/* saved after PCIe init for restore after reset */
-	u32 ipath_msi_hi;
-	/* MSI data (vector) saved for restore */
-	u16 ipath_msi_data;
-	/* MLID programmed for this instance */
-	u16 ipath_mlid;
-	/* LID programmed for this instance */
-	u16 ipath_lid;
-	/* list of pkeys programmed; 0 if not set */
-	u16 ipath_pkeys[4];
-	/*
-	 * ASCII serial number, from flash, large enough for original
-	 * all digit strings, and longer QLogic serial number format
-	 */
-	u8 ipath_serial[16];
-	/* human readable board version */
-	u8 ipath_boardversion[96];
-	u8 ipath_lbus_info[32]; /* human readable localbus info */
-	/* chip major rev, from ipath_revision */
-	u8 ipath_majrev;
-	/* chip minor rev, from ipath_revision */
-	u8 ipath_minrev;
-	/* board rev, from ipath_revision */
-	u8 ipath_boardrev;
-	/* saved for restore after reset */
-	u8 ipath_pci_cacheline;
-	/* LID mask control */
-	u8 ipath_lmc;
-	/* link width supported */
-	u8 ipath_link_width_supported;
-	/* link speed supported */
-	u8 ipath_link_speed_supported;
-	u8 ipath_link_width_enabled;
-	u8 ipath_link_speed_enabled;
-	u8 ipath_link_width_active;
-	u8 ipath_link_speed_active;
-	/* Rx Polarity inversion (compensate for ~tx on partner) */
-	u8 ipath_rx_pol_inv;
-
-	u8 ipath_r_portenable_shift;
-	u8 ipath_r_intravail_shift;
-	u8 ipath_r_tailupd_shift;
-	u8 ipath_r_portcfg_shift;
-
-	/* unit # of this chip, if present */
-	int ipath_unit;
-
-	/* local link integrity counter */
-	u32 ipath_lli_counter;
-	/* local link integrity errors */
-	u32 ipath_lli_errors;
-	/*
-	 * Above counts only cases where _successive_ LocalLinkIntegrity
-	 * errors were seen in the receive headers of kern-packets.
-	 * Below are the three (monotonically increasing) counters
-	 * maintained via GPIO interrupts on iba6120-rev2.
-	 */
-	u32 ipath_rxfc_unsupvl_errs;
-	u32 ipath_overrun_thresh_errs;
-	u32 ipath_lli_errs;
-
-	/*
-	 * Not all devices managed by a driver instance are the same
-	 * type, so these fields must be per-device.
-	 */
-	u64 ipath_i_bitsextant;
-	ipath_err_t ipath_e_bitsextant;
-	ipath_err_t ipath_hwe_bitsextant;
-
-	/*
-	 * Below should be computable from number of ports,
-	 * since they are never modified.
-	 */
-	u64 ipath_i_rcvavail_mask;
-	u64 ipath_i_rcvurg_mask;
-	u16 ipath_i_rcvurg_shift;
-	u16 ipath_i_rcvavail_shift;
-
-	/*
-	 * Register bits for selecting i2c direction and values, used for
-	 * I2C serial flash.
-	 */
-	u8 ipath_gpio_sda_num;
-	u8 ipath_gpio_scl_num;
-	u8 ipath_i2c_chain_type;
-	u64 ipath_gpio_sda;
-	u64 ipath_gpio_scl;
-
-	/* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
-	spinlock_t ipath_gpio_lock;
-
-	/*
-	 * IB link and linktraining states and masks that vary per chip in
-	 * some way.  Set at init, to avoid each IB status change interrupt
-	 */
-	u8 ibcs_ls_shift;
-	u8 ibcs_lts_mask;
-	u32 ibcs_mask;
-	u32 ib_init;
-	u32 ib_arm;
-	u32 ib_active;
-
-	u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
-
-	/*
-	 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
-	 * reg. Changes for IBA7220
-	 */
-	u8 ibcc_lic_mask; /* LinkInitCmd */
-	u8 ibcc_lc_shift; /* LinkCmd */
-	u8 ibcc_mpl_shift; /* Maxpktlen */
-
-	u8 delay_mult;
-
-	/* used to override LED behavior */
-	u8 ipath_led_override;  /* Substituted for normal value, if non-zero */
-	u16 ipath_led_override_timeoff; /* delta to next timer event */
-	u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
-	u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
-	atomic_t ipath_led_override_timer_active;
-	/* Used to flash LEDs in override mode */
-	struct timer_list ipath_led_override_timer;
-
-	/* Support (including locks) for EEPROM logging of errors and time */
-	/* control access to actual counters, timer */
-	spinlock_t ipath_eep_st_lock;
-	/* control high-level access to EEPROM */
-	struct mutex ipath_eep_lock;
-	/* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
-	uint64_t ipath_traffic_wds;
-	/* active time is kept in seconds, but logged in hours */
-	atomic_t ipath_active_time;
-	/* Below are nominal shadow of EEPROM, new since last EEPROM update */
-	uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
-	uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
-	uint16_t ipath_eep_hrs;
-	/*
-	 * masks for which bits of errs, hwerrs that cause
-	 * each of the counters to increment.
-	 */
-	struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
-
-	/* interrupt mitigation reload register info */
-	u16 ipath_jint_idle_ticks;	/* idle clock ticks */
-	u16 ipath_jint_max_packets;	/* max packets across all ports */
-
-	/*
-	 * lock for access to SerDes, and flags to sequence preset
-	 * versus steady-state. 7220-only at the moment.
-	 */
-	spinlock_t ipath_sdepb_lock;
-	u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
-};
-
-/* ipath_hol_state values (stopping/starting user proc, send flushing) */
-#define IPATH_HOL_UP       0
-#define IPATH_HOL_DOWN     1
-/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
-#define IPATH_HOL_DOWNSTOP 0
-#define IPATH_HOL_DOWNCONT 1
-
-/* bit positions for sdma_status */
-#define IPATH_SDMA_ABORTING  0
-#define IPATH_SDMA_DISARMED  1
-#define IPATH_SDMA_DISABLED  2
-#define IPATH_SDMA_LAYERBUF  3
-#define IPATH_SDMA_RUNNING  30
-#define IPATH_SDMA_SHUTDOWN 31
-
-/* bit combinations that correspond to abort states */
-#define IPATH_SDMA_ABORT_NONE 0
-#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
-#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
-	(1UL << IPATH_SDMA_DISARMED))
-#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
-	(1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
-	(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
-	(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-
-#define IPATH_SDMA_BUF_NONE 0
-#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
-
-/* Private data for file operations */
-struct ipath_filedata {
-	struct ipath_portdata *pd;
-	unsigned subport;
-	unsigned tidcursor;
-	struct ipath_user_sdma_queue *pq;
-};
-extern struct list_head ipath_dev_list;
-extern spinlock_t ipath_devs_lock;
-extern struct ipath_devdata *ipath_lookup(int unit);
-
-int ipath_init_chip(struct ipath_devdata *, int);
-int ipath_enable_wc(struct ipath_devdata *dd);
-void ipath_disable_wc(struct ipath_devdata *dd);
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
-void ipath_shutdown_device(struct ipath_devdata *);
-void ipath_clear_freeze(struct ipath_devdata *);
-
-struct file_operations;
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
-		    struct cdev **cdevp, struct device **devp);
-void ipath_cdev_cleanup(struct cdev **cdevp,
-			struct device **devp);
-
-int ipath_diag_add(struct ipath_devdata *);
-void ipath_diag_remove(struct ipath_devdata *);
-
-extern wait_queue_head_t ipath_state_wait;
-
-int ipath_user_add(struct ipath_devdata *dd);
-void ipath_user_remove(struct ipath_devdata *dd);
-
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
-
-extern int ipath_diag_inuse;
-
-irqreturn_t ipath_intr(int irq, void *devid);
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
-		     ipath_err_t err);
-#if __IPATH_INFO || __IPATH_DBG
-extern const char *ipath_ibcstatus_str[];
-#endif
-
-/* clean up any per-chip chip-specific stuff */
-void ipath_chip_cleanup(struct ipath_devdata *);
-/* clean up any chip type-specific stuff */
-void ipath_chip_done(void);
-
-void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
-			  unsigned cnt);
-void ipath_cancel_sends(struct ipath_devdata *, int);
-
-int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
-void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
-
-int ipath_parse_ushort(const char *str, unsigned short *valp);
-
-void ipath_kreceive(struct ipath_portdata *);
-int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
-int ipath_reset_device(int);
-void ipath_get_faststats(unsigned long);
-int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
-int ipath_set_linkstate(struct ipath_devdata *, u8);
-int ipath_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_lid(struct ipath_devdata *, u32, u8);
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
-void ipath_enable_armlaunch(struct ipath_devdata *);
-void ipath_disable_armlaunch(struct ipath_devdata *);
-void ipath_hol_down(struct ipath_devdata *);
-void ipath_hol_up(struct ipath_devdata *);
-void ipath_hol_event(unsigned long);
-void ipath_toggle_rclkrls(struct ipath_devdata *);
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
-void ipath_set_relock_poll(struct ipath_devdata *, int);
-void ipath_shutdown_relock_poll(struct ipath_devdata *);
-
-/* for use in system calls, where we want to know device type, etc. */
-#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
-#define subport_fp(fp) \
-	((struct ipath_filedata *)(fp)->private_data)->subport
-#define tidcursor_fp(fp) \
-	((struct ipath_filedata *)(fp)->private_data)->tidcursor
-#define user_sdma_queue_fp(fp) \
-	((struct ipath_filedata *)(fp)->private_data)->pq
-
-/*
- * values for ipath_flags
- */
-		/* chip can report link latency (IB 1.2) */
-#define IPATH_HAS_LINK_LATENCY 0x1
-		/* The chip is up and initted */
-#define IPATH_INITTED       0x2
-		/* set if any user code has set kr_rcvhdrsize */
-#define IPATH_RCVHDRSZ_SET  0x4
-		/* The chip is present and valid for accesses */
-#define IPATH_PRESENT       0x8
-		/* HT link0 is only 8 bits wide, ignore upper byte crc
-		 * errors, etc. */
-#define IPATH_8BIT_IN_HT0   0x10
-		/* HT link1 is only 8 bits wide, ignore upper byte crc
-		 * errors, etc. */
-#define IPATH_8BIT_IN_HT1   0x20
-		/* The link is down */
-#define IPATH_LINKDOWN      0x40
-		/* The link level is up (0x11) */
-#define IPATH_LINKINIT      0x80
-		/* The link is in the armed (0x21) state */
-#define IPATH_LINKARMED     0x100
-		/* The link is in the active (0x31) state */
-#define IPATH_LINKACTIVE    0x200
-		/* link current state is unknown */
-#define IPATH_LINKUNK       0x400
-		/* Write combining flush needed for PIO */
-#define IPATH_PIO_FLUSH_WC  0x1000
-		/* DMA Receive tail pointer */
-#define IPATH_NODMA_RTAIL   0x2000
-		/* no IB cable, or no device on IB cable */
-#define IPATH_NOCABLE       0x4000
-		/* Supports port zero per packet receive interrupts via
-		 * GPIO */
-#define IPATH_GPIO_INTR     0x8000
-		/* uses the coded 4byte TID, not 8 byte */
-#define IPATH_4BYTE_TID     0x10000
-		/* packet/word counters are 32 bit, else those 4 counters
-		 * are 64bit */
-#define IPATH_32BITCOUNTERS 0x20000
-		/* Interrupt register is 64 bits */
-#define IPATH_INTREG_64     0x40000
-		/* can miss port0 rx interrupts */
-#define IPATH_DISABLED      0x80000 /* administratively disabled */
-		/* Use GPIO interrupts for new counters */
-#define IPATH_GPIO_ERRINTRS 0x100000
-#define IPATH_SWAP_PIOBUFS  0x200000
-		/* Supports Send DMA */
-#define IPATH_HAS_SEND_DMA  0x400000
-		/* Supports Send Count (not just word count) in PBC */
-#define IPATH_HAS_PBC_CNT   0x800000
-		/* Suppress heartbeat, even if turning off loopback */
-#define IPATH_NO_HRTBT      0x1000000
-#define IPATH_HAS_THRESH_UPDATE 0x4000000
-#define IPATH_HAS_MULT_IB_SPEED 0x8000000
-#define IPATH_IB_AUTONEG_INPROG 0x10000000
-#define IPATH_IB_AUTONEG_FAILED 0x20000000
-		/* Linkdown-disable intentionally, Do not attempt to bring up */
-#define IPATH_IB_LINK_DISABLED 0x40000000
-#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
-
-/* Bits in GPIO for the added interrupts */
-#define IPATH_GPIO_PORT0_BIT 2
-#define IPATH_GPIO_RXUVL_BIT 3
-#define IPATH_GPIO_OVRUN_BIT 4
-#define IPATH_GPIO_LLI_BIT 5
-#define IPATH_GPIO_ERRINTR_MASK 0x38
-
-/* portdata flag bit offsets */
-		/* waiting for a packet to arrive */
-#define IPATH_PORT_WAITING_RCV   2
-		/* master has not finished initializing */
-#define IPATH_PORT_MASTER_UNINIT 4
-		/* waiting for an urgent packet to arrive */
-#define IPATH_PORT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-void ipath_free_data(struct ipath_portdata *dd);
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
-				unsigned len, int avail);
-void ipath_init_iba6110_funcs(struct ipath_devdata *);
-void ipath_get_eeprom_info(struct ipath_devdata *);
-int ipath_update_eeprom_log(struct ipath_devdata *dd);
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
-u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
-void ipath_disarm_senderrbufs(struct ipath_devdata *);
-void ipath_force_pio_avail_update(struct ipath_devdata *);
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
-
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define IPATH_LED_LOG 2  /* Logical (link) YELLOW LED */
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
-
-/* send dma routines */
-int setup_sdma(struct ipath_devdata *);
-void teardown_sdma(struct ipath_devdata *);
-void ipath_restart_sdma(struct ipath_devdata *);
-void ipath_sdma_intr(struct ipath_devdata *);
-int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
-			  u32, struct ipath_verbs_txreq *);
-/* ipath_sdma_lock should be locked before calling this. */
-int ipath_sdma_make_progress(struct ipath_devdata *dd);
-
-/* must be called under ipath_sdma_lock */
-static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
-{
-	return dd->ipath_sdma_descq_cnt -
-		(dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
-		1 - dd->ipath_sdma_desc_nreserved;
-}
-
-static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
-{
-	dd->ipath_sdma_desc_nreserved += cnt;
-}
-
-static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
-{
-	dd->ipath_sdma_desc_nreserved -= cnt;
-}
-
-/*
- * number of words used for protocol header if not set by ipath_userinit();
- */
-#define IPATH_DFLT_RCVHDRSIZE 9
-
-int ipath_get_user_pages(unsigned long, size_t, struct page **);
-void ipath_release_user_pages(struct page **, size_t);
-void ipath_release_user_pages_on_close(struct page **, size_t);
-int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
-int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
-int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
-int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
-
-/* these are used for the registers that vary with port */
-void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
-			   unsigned, u64);
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner.  It also gives us some error
- * checking.  64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible.  User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/*
- * At the moment, none of the s-registers are writable, so no
- * ipath_write_sreg().
- */
-
-/**
- * ipath_read_ureg32 - read 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @port: port number
- *
- * Return the contents of a register that is virtualized to be per port.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
-				    ipath_ureg regno, int port)
-{
-	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-		return 0;
-
-	return readl(regno + (u64 __iomem *)
-		     (dd->ipath_uregbase +
-		      (char __iomem *)dd->ipath_kregbase +
-		      dd->ipath_ureg_align * port));
-}
-
-/**
- * ipath_write_ureg - write 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @value: value
- * @port: port
- *
- * Write the contents of a register that is virtualized to be per port.
- */
-static inline void ipath_write_ureg(const struct ipath_devdata *dd,
-				    ipath_ureg regno, u64 value, int port)
-{
-	u64 __iomem *ubase = (u64 __iomem *)
-		(dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
-		 dd->ipath_ureg_align * port);
-	if (dd->ipath_kregbase)
-		writeq(value, &ubase[regno]);
-}
-
-static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
-				    ipath_kreg regno)
-{
-	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-		return -1;
-	return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
-				    ipath_kreg regno)
-{
-	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-		return -1;
-
-	return readq(&dd->ipath_kregbase[regno]);
-}
-
-static inline void ipath_write_kreg(const struct ipath_devdata *dd,
-				    ipath_kreg regno, u64 value)
-{
-	if (dd->ipath_kregbase)
-		writeq(value, &dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
-				  ipath_sreg regno)
-{
-	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-		return 0;
-
-	return readq(regno + (u64 __iomem *)
-		     (dd->ipath_cregbase +
-		      (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
-					 ipath_sreg regno)
-{
-	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-		return 0;
-	return readl(regno + (u64 __iomem *)
-		     (dd->ipath_cregbase +
-		      (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_write_creg(const struct ipath_devdata *dd,
-				    ipath_creg regno, u64 value)
-{
-	if (dd->ipath_kregbase)
-		writeq(value, regno + (u64 __iomem *)
-		       (dd->ipath_cregbase +
-			(char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
-{
-	*((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
-{
-	return (u32) le64_to_cpu(*((volatile __le64 *)
-				pd->port_rcvhdrtail_kvaddr));
-}
-
-static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
-{
-	const struct ipath_devdata *dd = pd->port_dd;
-	u32 hdrqtail;
-
-	if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-		__le32 *rhf_addr;
-		u32 seq;
-
-		rhf_addr = (__le32 *) pd->port_rcvhdrq +
-			pd->port_head + dd->ipath_rhf_offset;
-		seq = ipath_hdrget_seq(rhf_addr);
-		hdrqtail = pd->port_head;
-		if (seq == pd->port_seq_cnt)
-			hdrqtail++;
-	} else
-		hdrqtail = ipath_get_rcvhdrtail(pd);
-
-	return hdrqtail;
-}
-
-static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
-{
-	return (dd->ipath_flags & IPATH_INTREG_64) ?
-		ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return linkstate
- * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
- * everywhere, anyway (and should be, for almost all purposes).
- */
-static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
-	u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
-		INFINIPATH_IBCS_LINKSTATE_MASK;
-	if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
-		state = INFINIPATH_IBCS_L_STATE_ACTIVE;
-	return state;
-}
-
-/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
-static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
-{
-	return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-		dd->ibcs_lts_mask;
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return logical link state
- * combination of link state and linktraining state (down, active, init,
- * arm, etc.
- */
-static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
-{
-	u32 ibs;
-	ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-		dd->ibcs_lts_mask;
-	ibs |= (u32)(ibcs &
-		(INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
-	return ibs;
-}
-
-/*
- * sysfs interface.
- */
-
-struct device_driver;
-
-extern const char ib_ipath_version[];
-
-extern const struct attribute_group *ipath_driver_attr_groups[];
-
-int ipath_device_create_group(struct device *, struct ipath_devdata *);
-void ipath_device_remove_group(struct device *, struct ipath_devdata *);
-int ipath_expose_reset(struct device *);
-
-int ipath_init_ipathfs(void);
-void ipath_exit_ipathfs(void);
-int ipathfs_add_device(struct ipath_devdata *);
-int ipathfs_remove_device(struct ipath_devdata *);
-
-/*
- * dma_addr wrappers - all 0's invalid for hw
- */
-dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
-			  size_t, int);
-dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
-const char *ipath_get_unit_name(int unit);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-#if defined(CONFIG_X86_64)
-#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
-#else
-#define ipath_flush_wc() wmb()
-#endif
-
-extern unsigned ipath_debug; /* debugging bit mask */
-extern unsigned ipath_linkrecovery;
-extern unsigned ipath_mtu4096;
-extern struct mutex ipath_mutex;
-
-#define IPATH_DRV_NAME		"ib_ipath"
-#define IPATH_MAJOR		233
-#define IPATH_USER_MINOR_BASE	0
-#define IPATH_DIAGPKT_MINOR	127
-#define IPATH_DIAG_MINOR_BASE	129
-#define IPATH_NMINORS		255
-
-#define ipath_dev_err(dd,fmt,...) \
-	do { \
-		const struct ipath_devdata *__dd = (dd); \
-		if (__dd->pcidev) \
-			dev_err(&__dd->pcidev->dev, "%s: " fmt, \
-				ipath_get_unit_name(__dd->ipath_unit), \
-				##__VA_ARGS__); \
-		else \
-			printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
-			       ipath_get_unit_name(__dd->ipath_unit), \
-			       ##__VA_ARGS__); \
-	} while (0)
-
-#if _IPATH_DEBUGGING
-
-# define __IPATH_DBG_WHICH(which,fmt,...) \
-	do { \
-		if (unlikely(ipath_debug & (which))) \
-			printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
-			       __func__,##__VA_ARGS__); \
-	} while(0)
-
-# define ipath_dbg(fmt,...) \
-	__IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
-# define ipath_cdbg(which,fmt,...) \
-	__IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
-
-#else /* ! _IPATH_DEBUGGING */
-
-# define ipath_dbg(fmt,...)
-# define ipath_cdbg(which,fmt,...)
-
-#endif /* _IPATH_DEBUGGING */
-
-/*
- * this is used for formatting hw error messages...
- */
-struct ipath_hwerror_msgs {
-	u64 mask;
-	const char *msg;
-};
-
-#define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
-
-/* in ipath_intr.c... */
-void ipath_format_hwerrors(u64 hwerrs,
-			   const struct ipath_hwerror_msgs *hwerrmsgs,
-			   size_t nhwerrmsgs,
-			   char *msg, size_t lmsg);
-
-#endif				/* _IPATH_KERNEL_H */
diff --git a/drivers/staging/rdma/ipath/ipath_keys.c b/drivers/staging/rdma/ipath/ipath_keys.c
deleted file mode 100644
index c0e933f..0000000
--- a/drivers/staging/rdma/ipath/ipath_keys.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <asm/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_alloc_lkey - allocate an lkey
- * @rkt: lkey table in which to allocate the lkey
- * @mr: memory region that this lkey protects
- *
- * Returns 1 if successful, otherwise returns 0.
- */
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
-{
-	unsigned long flags;
-	u32 r;
-	u32 n;
-	int ret;
-
-	spin_lock_irqsave(&rkt->lock, flags);
-
-	/* Find the next available LKEY */
-	r = n = rkt->next;
-	for (;;) {
-		if (rkt->table[r] == NULL)
-			break;
-		r = (r + 1) & (rkt->max - 1);
-		if (r == n) {
-			spin_unlock_irqrestore(&rkt->lock, flags);
-			ipath_dbg("LKEY table full\n");
-			ret = 0;
-			goto bail;
-		}
-	}
-	rkt->next = (r + 1) & (rkt->max - 1);
-	/*
-	 * Make sure lkey is never zero which is reserved to indicate an
-	 * unrestricted LKEY.
-	 */
-	rkt->gen++;
-	mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
-		((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
-		 << 8);
-	if (mr->lkey == 0) {
-		mr->lkey |= 1 << 8;
-		rkt->gen++;
-	}
-	rkt->table[r] = mr;
-	spin_unlock_irqrestore(&rkt->lock, flags);
-
-	ret = 1;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_free_lkey - free an lkey
- * @rkt: table from which to free the lkey
- * @lkey: lkey id to free
- */
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
-{
-	unsigned long flags;
-	u32 r;
-
-	if (lkey == 0)
-		return;
-	r = lkey >> (32 - ib_ipath_lkey_table_size);
-	spin_lock_irqsave(&rkt->lock, flags);
-	rkt->table[r] = NULL;
-	spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * ipath_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
-		  struct ib_sge *sge, int acc)
-{
-	struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
-	struct ipath_mregion *mr;
-	unsigned n, m;
-	size_t off;
-	int ret;
-
-	/*
-	 * We use LKEY == zero for kernel virtual addresses
-	 * (see ipath_get_dma_mr and ipath_dma.c).
-	 */
-	if (sge->lkey == 0) {
-		/* always a kernel port, no locking needed */
-		struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
-		if (pd->user) {
-			ret = 0;
-			goto bail;
-		}
-		isge->mr = NULL;
-		isge->vaddr = (void *) sge->addr;
-		isge->length = sge->length;
-		isge->sge_length = sge->length;
-		ret = 1;
-		goto bail;
-	}
-	mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
-	if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
-		     qp->ibqp.pd != mr->pd)) {
-		ret = 0;
-		goto bail;
-	}
-
-	off = sge->addr - mr->user_base;
-	if (unlikely(sge->addr < mr->user_base ||
-		     off + sge->length > mr->length ||
-		     (mr->access_flags & acc) != acc)) {
-		ret = 0;
-		goto bail;
-	}
-
-	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= IPATH_SEGSZ) {
-			m++;
-			n = 0;
-		}
-	}
-	isge->mr = mr;
-	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
-	isge->length = mr->map[m]->segs[n].length - off;
-	isge->sge_length = sge->length;
-	isge->m = m;
-	isge->n = n;
-
-	ret = 1;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_rkey_ok - check the IB virtual address, length, and RKEY
- * @dev: infiniband device
- * @ss: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- */
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
-		  u32 len, u64 vaddr, u32 rkey, int acc)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	struct ipath_lkey_table *rkt = &dev->lk_table;
-	struct ipath_sge *sge = &ss->sge;
-	struct ipath_mregion *mr;
-	unsigned n, m;
-	size_t off;
-	int ret;
-
-	/*
-	 * We use RKEY == zero for kernel virtual addresses
-	 * (see ipath_get_dma_mr and ipath_dma.c).
-	 */
-	if (rkey == 0) {
-		/* always a kernel port, no locking needed */
-		struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
-		if (pd->user) {
-			ret = 0;
-			goto bail;
-		}
-		sge->mr = NULL;
-		sge->vaddr = (void *) vaddr;
-		sge->length = len;
-		sge->sge_length = len;
-		ss->sg_list = NULL;
-		ss->num_sge = 1;
-		ret = 1;
-		goto bail;
-	}
-
-	mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
-	if (unlikely(mr == NULL || mr->lkey != rkey ||
-		     qp->ibqp.pd != mr->pd)) {
-		ret = 0;
-		goto bail;
-	}
-
-	off = vaddr - mr->iova;
-	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
-		     (mr->access_flags & acc) == 0)) {
-		ret = 0;
-		goto bail;
-	}
-
-	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= IPATH_SEGSZ) {
-			m++;
-			n = 0;
-		}
-	}
-	sge->mr = mr;
-	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
-	sge->length = mr->map[m]->segs[n].length - off;
-	sge->sge_length = len;
-	sge->m = m;
-	sge->n = n;
-	ss->sg_list = NULL;
-	ss->num_sge = 1;
-
-	ret = 1;
-
-bail:
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mad.c b/drivers/staging/rdma/ipath/ipath_mad.c
deleted file mode 100644
index ad3a926..0000000
--- a/drivers/staging/rdma/ipath/ipath_mad.c
+++ /dev/null
@@ -1,1521 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_pma.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define IB_SMP_UNSUP_VERSION	cpu_to_be16(0x0004)
-#define IB_SMP_UNSUP_METHOD	cpu_to_be16(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR	cpu_to_be16(0x000C)
-#define IB_SMP_INVALID_FIELD	cpu_to_be16(0x001C)
-
-static int reply(struct ib_smp *smp)
-{
-	/*
-	 * The verbs framework will handle the directed/LID route
-	 * packet changes.
-	 */
-	smp->method = IB_MGMT_METHOD_GET_RESP;
-	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
-		smp->status |= IB_SMP_DIRECTION;
-	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static int recv_subn_get_nodedescription(struct ib_smp *smp,
-					 struct ib_device *ibdev)
-{
-	if (smp->attr_mod)
-		smp->status |= IB_SMP_INVALID_FIELD;
-
-	memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
-
-	return reply(smp);
-}
-
-struct nodeinfo {
-	u8 base_version;
-	u8 class_version;
-	u8 node_type;
-	u8 num_ports;
-	__be64 sys_guid;
-	__be64 node_guid;
-	__be64 port_guid;
-	__be16 partition_cap;
-	__be16 device_id;
-	__be32 revision;
-	u8 local_port_num;
-	u8 vendor_id[3];
-} __attribute__ ((packed));
-
-static int recv_subn_get_nodeinfo(struct ib_smp *smp,
-				  struct ib_device *ibdev, u8 port)
-{
-	struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
-	struct ipath_devdata *dd = to_idev(ibdev)->dd;
-	u32 vendor, majrev, minrev;
-
-	/* GUID 0 is illegal */
-	if (smp->attr_mod || (dd->ipath_guid == 0))
-		smp->status |= IB_SMP_INVALID_FIELD;
-
-	nip->base_version = 1;
-	nip->class_version = 1;
-	nip->node_type = 1;	/* channel adapter */
-	/*
-	 * XXX The num_ports value will need a layer function to get
-	 * the value if we ever have more than one IB port on a chip.
-	 * We will also need to get the GUID for the port.
-	 */
-	nip->num_ports = ibdev->phys_port_cnt;
-	/* This is already in network order */
-	nip->sys_guid = to_idev(ibdev)->sys_image_guid;
-	nip->node_guid = dd->ipath_guid;
-	nip->port_guid = dd->ipath_guid;
-	nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
-	nip->device_id = cpu_to_be16(dd->ipath_deviceid);
-	majrev = dd->ipath_majrev;
-	minrev = dd->ipath_minrev;
-	nip->revision = cpu_to_be32((majrev << 16) | minrev);
-	nip->local_port_num = port;
-	vendor = dd->ipath_vendorid;
-	nip->vendor_id[0] = IPATH_SRC_OUI_1;
-	nip->vendor_id[1] = IPATH_SRC_OUI_2;
-	nip->vendor_id[2] = IPATH_SRC_OUI_3;
-
-	return reply(smp);
-}
-
-static int recv_subn_get_guidinfo(struct ib_smp *smp,
-				  struct ib_device *ibdev)
-{
-	u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
-	__be64 *p = (__be64 *) smp->data;
-
-	/* 32 blocks of 8 64-bit GUIDs per block */
-
-	memset(smp->data, 0, sizeof(smp->data));
-
-	/*
-	 * We only support one GUID for now.  If this changes, the
-	 * portinfo.guid_cap field needs to be updated too.
-	 */
-	if (startgx == 0) {
-		__be64 g = to_idev(ibdev)->dd->ipath_guid;
-		if (g == 0)
-			/* GUID 0 is illegal */
-			smp->status |= IB_SMP_INVALID_FIELD;
-		else
-			/* The first is a copy of the read-only HW GUID. */
-			*p = g;
-	} else
-		smp->status |= IB_SMP_INVALID_FIELD;
-
-	return reply(smp);
-}
-
-static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
-{
-	(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
-{
-	(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
-}
-
-static int get_overrunthreshold(struct ipath_devdata *dd)
-{
-	return (dd->ipath_ibcctrl >>
-		INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-}
-
-/**
- * set_overrunthreshold - set the overrun threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
-{
-	unsigned v;
-
-	v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-	if (v != n) {
-		dd->ipath_ibcctrl &=
-			~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
-			  INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
-		dd->ipath_ibcctrl |=
-			(u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-	}
-	return 0;
-}
-
-static int get_phyerrthreshold(struct ipath_devdata *dd)
-{
-	return (dd->ipath_ibcctrl >>
-		INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-}
-
-/**
- * set_phyerrthreshold - set the physical error threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
-{
-	unsigned v;
-
-	v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-	if (v != n) {
-		dd->ipath_ibcctrl &=
-			~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
-			  INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
-		dd->ipath_ibcctrl |=
-			(u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-	}
-	return 0;
-}
-
-/**
- * get_linkdowndefaultstate - get the default linkdown state
- * @dd: the infinipath device
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-static int get_linkdowndefaultstate(struct ipath_devdata *dd)
-{
-	return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
-}
-
-static int recv_subn_get_portinfo(struct ib_smp *smp,
-				  struct ib_device *ibdev, u8 port)
-{
-	struct ipath_ibdev *dev;
-	struct ipath_devdata *dd;
-	struct ib_port_info *pip = (struct ib_port_info *)smp->data;
-	u16 lid;
-	u8 ibcstat;
-	u8 mtu;
-	int ret;
-
-	if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
-		smp->status |= IB_SMP_INVALID_FIELD;
-		ret = reply(smp);
-		goto bail;
-	}
-
-	dev = to_idev(ibdev);
-	dd = dev->dd;
-
-	/* Clear all fields.  Only set the non-zero fields. */
-	memset(smp->data, 0, sizeof(smp->data));
-
-	/* Only return the mkey if the protection field allows it. */
-	if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
-	    dev->mkeyprot == 0)
-		pip->mkey = dev->mkey;
-	pip->gid_prefix = dev->gid_prefix;
-	lid = dd->ipath_lid;
-	pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
-	pip->sm_lid = cpu_to_be16(dev->sm_lid);
-	pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
-	/* pip->diag_code; */
-	pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
-	pip->local_port_num = port;
-	pip->link_width_enabled = dd->ipath_link_width_enabled;
-	pip->link_width_supported = dd->ipath_link_width_supported;
-	pip->link_width_active = dd->ipath_link_width_active;
-	pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
-	ibcstat = dd->ipath_lastibcstat;
-	/* map LinkState to IB portinfo values.  */
-	pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
-
-	pip->portphysstate_linkdown =
-		(ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
-		(get_linkdowndefaultstate(dd) ? 1 : 2);
-	pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
-	pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
-		dd->ipath_link_speed_enabled;
-	switch (dd->ipath_ibmtu) {
-	case 4096:
-		mtu = IB_MTU_4096;
-		break;
-	case 2048:
-		mtu = IB_MTU_2048;
-		break;
-	case 1024:
-		mtu = IB_MTU_1024;
-		break;
-	case 512:
-		mtu = IB_MTU_512;
-		break;
-	case 256:
-		mtu = IB_MTU_256;
-		break;
-	default:		/* oops, something is wrong */
-		mtu = IB_MTU_2048;
-		break;
-	}
-	pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
-	pip->vlcap_inittype = 0x10;	/* VLCap = VL0, InitType = 0 */
-	pip->vl_high_limit = dev->vl_high_limit;
-	/* pip->vl_arb_high_cap; // only one VL */
-	/* pip->vl_arb_low_cap; // only one VL */
-	/* InitTypeReply = 0 */
-	/* our mtu cap depends on whether 4K MTU enabled or not */
-	pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
-	/* HCAs ignore VLStallCount and HOQLife */
-	/* pip->vlstallcnt_hoqlife; */
-	pip->operationalvl_pei_peo_fpi_fpo = 0x10;	/* OVLs = 1 */
-	pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
-	/* P_KeyViolations are counted by hardware. */
-	pip->pkey_violations =
-		cpu_to_be16((ipath_get_cr_errpkey(dd) -
-			     dev->z_pkey_violations) & 0xFFFF);
-	pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
-	/* Only the hardware GUID is supported for now */
-	pip->guid_cap = 1;
-	pip->clientrereg_resv_subnetto = dev->subnet_timeout;
-	/* 32.768 usec. response time (guessing) */
-	pip->resv_resptimevalue = 3;
-	pip->localphyerrors_overrunerrors =
-		(get_phyerrthreshold(dd) << 4) |
-		get_overrunthreshold(dd);
-	/* pip->max_credit_hint; */
-	if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
-		u32 v;
-
-		v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
-		pip->link_roundtrip_latency[0] = v >> 16;
-		pip->link_roundtrip_latency[1] = v >> 8;
-		pip->link_roundtrip_latency[2] = v;
-	}
-
-	ret = reply(smp);
-
-bail:
-	return ret;
-}
-
-/**
- * get_pkeys - return the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
-	/* always a kernel port, no locking needed */
-	struct ipath_portdata *pd = dd->ipath_pd[0];
-
-	memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
-
-	return 0;
-}
-
-static int recv_subn_get_pkeytable(struct ib_smp *smp,
-				   struct ib_device *ibdev)
-{
-	u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
-	u16 *p = (u16 *) smp->data;
-	__be16 *q = (__be16 *) smp->data;
-
-	/* 64 blocks of 32 16-bit P_Key entries */
-
-	memset(smp->data, 0, sizeof(smp->data));
-	if (startpx == 0) {
-		struct ipath_ibdev *dev = to_idev(ibdev);
-		unsigned i, n = ipath_get_npkeys(dev->dd);
-
-		get_pkeys(dev->dd, p);
-
-		for (i = 0; i < n; i++)
-			q[i] = cpu_to_be16(p[i]);
-	} else
-		smp->status |= IB_SMP_INVALID_FIELD;
-
-	return reply(smp);
-}
-
-static int recv_subn_set_guidinfo(struct ib_smp *smp,
-				  struct ib_device *ibdev)
-{
-	/* The only GUID we support is the first read-only entry. */
-	return recv_subn_get_guidinfo(smp, ibdev);
-}
-
-/**
- * set_linkdowndefaultstate - set the default linkdown state
- * @dd: the infinipath device
- * @sleep: the new state
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
-{
-	if (sleep)
-		dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-	else
-		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-			 dd->ipath_ibcctrl);
-	return 0;
-}
-
-/**
- * recv_subn_set_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- * Set Portinfo (see ch. 14.2.5.6).
- */
-static int recv_subn_set_portinfo(struct ib_smp *smp,
-				  struct ib_device *ibdev, u8 port)
-{
-	struct ib_port_info *pip = (struct ib_port_info *)smp->data;
-	struct ib_event event;
-	struct ipath_ibdev *dev;
-	struct ipath_devdata *dd;
-	char clientrereg = 0;
-	u16 lid, smlid;
-	u8 lwe;
-	u8 lse;
-	u8 state;
-	u16 lstate;
-	u32 mtu;
-	int ret, ore;
-
-	if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
-		goto err;
-
-	dev = to_idev(ibdev);
-	dd = dev->dd;
-	event.device = ibdev;
-	event.element.port_num = port;
-
-	dev->mkey = pip->mkey;
-	dev->gid_prefix = pip->gid_prefix;
-	dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
-
-	lid = be16_to_cpu(pip->lid);
-	if (dd->ipath_lid != lid ||
-	    dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
-		/* Must be a valid unicast LID address. */
-		if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
-			goto err;
-		ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
-		event.event = IB_EVENT_LID_CHANGE;
-		ib_dispatch_event(&event);
-	}
-
-	smlid = be16_to_cpu(pip->sm_lid);
-	if (smlid != dev->sm_lid) {
-		/* Must be a valid unicast LID address. */
-		if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
-			goto err;
-		dev->sm_lid = smlid;
-		event.event = IB_EVENT_SM_CHANGE;
-		ib_dispatch_event(&event);
-	}
-
-	/* Allow 1x or 4x to be set (see 14.2.6.6). */
-	lwe = pip->link_width_enabled;
-	if (lwe) {
-		if (lwe == 0xFF)
-			lwe = dd->ipath_link_width_supported;
-		else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
-			goto err;
-		set_link_width_enabled(dd, lwe);
-	}
-
-	/* Allow 2.5 or 5.0 Gbs. */
-	lse = pip->linkspeedactive_enabled & 0xF;
-	if (lse) {
-		if (lse == 15)
-			lse = dd->ipath_link_speed_supported;
-		else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
-			goto err;
-		set_link_speed_enabled(dd, lse);
-	}
-
-	/* Set link down default state. */
-	switch (pip->portphysstate_linkdown & 0xF) {
-	case 0: /* NOP */
-		break;
-	case 1: /* SLEEP */
-		if (set_linkdowndefaultstate(dd, 1))
-			goto err;
-		break;
-	case 2: /* POLL */
-		if (set_linkdowndefaultstate(dd, 0))
-			goto err;
-		break;
-	default:
-		goto err;
-	}
-
-	dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
-	dev->vl_high_limit = pip->vl_high_limit;
-
-	switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
-	case IB_MTU_256:
-		mtu = 256;
-		break;
-	case IB_MTU_512:
-		mtu = 512;
-		break;
-	case IB_MTU_1024:
-		mtu = 1024;
-		break;
-	case IB_MTU_2048:
-		mtu = 2048;
-		break;
-	case IB_MTU_4096:
-		if (!ipath_mtu4096)
-			goto err;
-		mtu = 4096;
-		break;
-	default:
-		/* XXX We have already partially updated our state! */
-		goto err;
-	}
-	ipath_set_mtu(dd, mtu);
-
-	dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
-
-	/* We only support VL0 */
-	if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
-		goto err;
-
-	if (pip->mkey_violations == 0)
-		dev->mkey_violations = 0;
-
-	/*
-	 * Hardware counter can't be reset so snapshot and subtract
-	 * later.
-	 */
-	if (pip->pkey_violations == 0)
-		dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
-
-	if (pip->qkey_violations == 0)
-		dev->qkey_violations = 0;
-
-	ore = pip->localphyerrors_overrunerrors;
-	if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
-		goto err;
-
-	if (set_overrunthreshold(dd, (ore & 0xF)))
-		goto err;
-
-	dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
-
-	if (pip->clientrereg_resv_subnetto & 0x80) {
-		clientrereg = 1;
-		event.event = IB_EVENT_CLIENT_REREGISTER;
-		ib_dispatch_event(&event);
-	}
-
-	/*
-	 * Do the port state change now that the other link parameters
-	 * have been set.
-	 * Changing the port physical state only makes sense if the link
-	 * is down or is being set to down.
-	 */
-	state = pip->linkspeed_portstate & 0xF;
-	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
-	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
-		goto err;
-
-	/*
-	 * Only state changes of DOWN, ARM, and ACTIVE are valid
-	 * and must be in the correct state to take effect (see 7.2.6).
-	 */
-	switch (state) {
-	case IB_PORT_NOP:
-		if (lstate == 0)
-			break;
-		/* FALLTHROUGH */
-	case IB_PORT_DOWN:
-		if (lstate == 0)
-			lstate = IPATH_IB_LINKDOWN_ONLY;
-		else if (lstate == 1)
-			lstate = IPATH_IB_LINKDOWN_SLEEP;
-		else if (lstate == 2)
-			lstate = IPATH_IB_LINKDOWN;
-		else if (lstate == 3)
-			lstate = IPATH_IB_LINKDOWN_DISABLE;
-		else
-			goto err;
-		ipath_set_linkstate(dd, lstate);
-		if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
-			ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-			goto done;
-		}
-		ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
-				IPATH_LINKACTIVE, 1000);
-		break;
-	case IB_PORT_ARMED:
-		ipath_set_linkstate(dd, IPATH_IB_LINKARM);
-		break;
-	case IB_PORT_ACTIVE:
-		ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
-		break;
-	default:
-		/* XXX We have already partially updated our state! */
-		goto err;
-	}
-
-	ret = recv_subn_get_portinfo(smp, ibdev, port);
-
-	if (clientrereg)
-		pip->clientrereg_resv_subnetto |= 0x80;
-
-	goto done;
-
-err:
-	smp->status |= IB_SMP_INVALID_FIELD;
-	ret = recv_subn_get_portinfo(smp, ibdev, port);
-
-done:
-	return ret;
-}
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the infinipath device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct ipath_devdata *dd, u16 key)
-{
-	int i;
-	int ret;
-
-	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (dd->ipath_pkeys[i] != key)
-			continue;
-		if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
-			dd->ipath_pkeys[i] = 0;
-			ret = 1;
-			goto bail;
-		}
-		break;
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @dd: the infinipath device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct ipath_devdata *dd, u16 key)
-{
-	int i;
-	u16 lkey = key & 0x7FFF;
-	int any = 0;
-	int ret;
-
-	if (lkey == 0x7FFF) {
-		ret = 0;
-		goto bail;
-	}
-
-	/* Look for an empty slot or a matching PKEY. */
-	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (!dd->ipath_pkeys[i]) {
-			any++;
-			continue;
-		}
-		/* If it matches exactly, try to increment the ref count */
-		if (dd->ipath_pkeys[i] == key) {
-			if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
-				ret = 0;
-				goto bail;
-			}
-			/* Lost the race. Look for an empty slot below. */
-			atomic_dec(&dd->ipath_pkeyrefs[i]);
-			any++;
-		}
-		/*
-		 * It makes no sense to have both the limited and unlimited
-		 * PKEY set at the same time since the unlimited one will
-		 * disable the limited one.
-		 */
-		if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
-			ret = -EEXIST;
-			goto bail;
-		}
-	}
-	if (!any) {
-		ret = -EBUSY;
-		goto bail;
-	}
-	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (!dd->ipath_pkeys[i] &&
-		    atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
-			/* for ipathstats, etc. */
-			ipath_stats.sps_pkeys[i] = lkey;
-			dd->ipath_pkeys[i] = key;
-			ret = 1;
-			goto bail;
-		}
-	}
-	ret = -EBUSY;
-
-bail:
-	return ret;
-}
-
-/**
- * set_pkeys - set the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
-{
-	struct ipath_portdata *pd;
-	int i;
-	int changed = 0;
-
-	/* always a kernel port, no locking needed */
-	pd = dd->ipath_pd[0];
-
-	for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-		u16 key = pkeys[i];
-		u16 okey = pd->port_pkeys[i];
-
-		if (key == okey)
-			continue;
-		/*
-		 * The value of this PKEY table entry is changing.
-		 * Remove the old entry in the hardware's array of PKEYs.
-		 */
-		if (okey & 0x7FFF)
-			changed |= rm_pkey(dd, okey);
-		if (key & 0x7FFF) {
-			int ret = add_pkey(dd, key);
-
-			if (ret < 0)
-				key = 0;
-			else
-				changed |= ret;
-		}
-		pd->port_pkeys[i] = key;
-	}
-	if (changed) {
-		u64 pkey;
-		struct ib_event event;
-
-		pkey = (u64) dd->ipath_pkeys[0] |
-			((u64) dd->ipath_pkeys[1] << 16) |
-			((u64) dd->ipath_pkeys[2] << 32) |
-			((u64) dd->ipath_pkeys[3] << 48);
-		ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
-			   (unsigned long long) pkey);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
-				 pkey);
-
-		event.event = IB_EVENT_PKEY_CHANGE;
-		event.device = &dd->verbs_dev->ibdev;
-		event.element.port_num = port;
-		ib_dispatch_event(&event);
-	}
-	return 0;
-}
-
-static int recv_subn_set_pkeytable(struct ib_smp *smp,
-				   struct ib_device *ibdev, u8 port)
-{
-	u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
-	__be16 *p = (__be16 *) smp->data;
-	u16 *q = (u16 *) smp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	unsigned i, n = ipath_get_npkeys(dev->dd);
-
-	for (i = 0; i < n; i++)
-		q[i] = be16_to_cpu(p[i]);
-
-	if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
-		smp->status |= IB_SMP_INVALID_FIELD;
-
-	return recv_subn_get_pkeytable(smp, ibdev);
-}
-
-static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
-{
-	struct ib_class_port_info *p =
-		(struct ib_class_port_info *)pmp->data;
-
-	memset(pmp->data, 0, sizeof(pmp->data));
-
-	if (pmp->mad_hdr.attr_mod != 0)
-		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-	/* Indicate AllPortSelect is valid (only one port anyway) */
-	p->capability_mask = cpu_to_be16(1 << 8);
-	p->base_version = 1;
-	p->class_version = 1;
-	/*
-	 * Expected response time is 4.096 usec. * 2^18 == 1.073741824
-	 * sec.
-	 */
-	p->resp_time_value = 18;
-
-	return reply((struct ib_smp *) pmp);
-}
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
-				    COUNTER_MASK(1, 1) | \
-				    COUNTER_MASK(1, 2) | \
-				    COUNTER_MASK(1, 3) | \
-				    COUNTER_MASK(1, 4))
-
-static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
-					   struct ib_device *ibdev, u8 port)
-{
-	struct ib_pma_portsamplescontrol *p =
-		(struct ib_pma_portsamplescontrol *)pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-	unsigned long flags;
-	u8 port_select = p->port_select;
-
-	memset(pmp->data, 0, sizeof(pmp->data));
-
-	p->port_select = port_select;
-	if (pmp->mad_hdr.attr_mod != 0 ||
-	    (port_select != port && port_select != 0xFF))
-		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-	/*
-	 * Ticks are 10x the link transfer period which for 2.5Gbs is 4
-	 * nsec.  0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec.  Sample
-	 * intervals are counted in ticks.  Since we use Linux timers, that
-	 * count in jiffies, we can't sample for less than 1000 ticks if HZ
-	 * == 1000 (4000 ticks if HZ is 250).  link_speed_active returns 2 for
-	 * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
-	 * have hardware support for delaying packets.
-	 */
-	if (crp->cr_psstat)
-		p->tick = dev->dd->ipath_link_speed_active - 1;
-	else
-		p->tick = 250;		/* 1 usec. */
-	p->counter_width = 4;	/* 32 bit counters */
-	p->counter_mask0_9 = COUNTER_MASK0_9;
-	spin_lock_irqsave(&dev->pending_lock, flags);
-	if (crp->cr_psstat)
-		p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-	else
-		p->sample_status = dev->pma_sample_status;
-	p->sample_start = cpu_to_be32(dev->pma_sample_start);
-	p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
-	p->tag = cpu_to_be16(dev->pma_tag);
-	p->counter_select[0] = dev->pma_counter_select[0];
-	p->counter_select[1] = dev->pma_counter_select[1];
-	p->counter_select[2] = dev->pma_counter_select[2];
-	p->counter_select[3] = dev->pma_counter_select[3];
-	p->counter_select[4] = dev->pma_counter_select[4];
-	spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-	return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
-					   struct ib_device *ibdev, u8 port)
-{
-	struct ib_pma_portsamplescontrol *p =
-		(struct ib_pma_portsamplescontrol *)pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-	unsigned long flags;
-	u8 status;
-	int ret;
-
-	if (pmp->mad_hdr.attr_mod != 0 ||
-	    (p->port_select != port && p->port_select != 0xFF)) {
-		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-		ret = reply((struct ib_smp *) pmp);
-		goto bail;
-	}
-
-	spin_lock_irqsave(&dev->pending_lock, flags);
-	if (crp->cr_psstat)
-		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-	else
-		status = dev->pma_sample_status;
-	if (status == IB_PMA_SAMPLE_STATUS_DONE) {
-		dev->pma_sample_start = be32_to_cpu(p->sample_start);
-		dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
-		dev->pma_tag = be16_to_cpu(p->tag);
-		dev->pma_counter_select[0] = p->counter_select[0];
-		dev->pma_counter_select[1] = p->counter_select[1];
-		dev->pma_counter_select[2] = p->counter_select[2];
-		dev->pma_counter_select[3] = p->counter_select[3];
-		dev->pma_counter_select[4] = p->counter_select[4];
-		if (crp->cr_psstat) {
-			ipath_write_creg(dev->dd, crp->cr_psinterval,
-					 dev->pma_sample_interval);
-			ipath_write_creg(dev->dd, crp->cr_psstart,
-					 dev->pma_sample_start);
-		} else
-			dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
-	}
-	spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-	ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
-
-bail:
-	return ret;
-}
-
-static u64 get_counter(struct ipath_ibdev *dev,
-		       struct ipath_cregs const *crp,
-		       __be16 sel)
-{
-	u64 ret;
-
-	switch (sel) {
-	case IB_PMA_PORT_XMIT_DATA:
-		ret = (crp->cr_psxmitdatacount) ?
-			ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
-			dev->ipath_sword;
-		break;
-	case IB_PMA_PORT_RCV_DATA:
-		ret = (crp->cr_psrcvdatacount) ?
-			ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
-			dev->ipath_rword;
-		break;
-	case IB_PMA_PORT_XMIT_PKTS:
-		ret = (crp->cr_psxmitpktscount) ?
-			ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
-			dev->ipath_spkts;
-		break;
-	case IB_PMA_PORT_RCV_PKTS:
-		ret = (crp->cr_psrcvpktscount) ?
-			ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
-			dev->ipath_rpkts;
-		break;
-	case IB_PMA_PORT_XMIT_WAIT:
-		ret = (crp->cr_psxmitwaitcount) ?
-			ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
-			dev->ipath_xmit_wait;
-		break;
-	default:
-		ret = 0;
-	}
-
-	return ret;
-}
-
-static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
-					  struct ib_device *ibdev)
-{
-	struct ib_pma_portsamplesresult *p =
-		(struct ib_pma_portsamplesresult *)pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-	u8 status;
-	int i;
-
-	memset(pmp->data, 0, sizeof(pmp->data));
-	p->tag = cpu_to_be16(dev->pma_tag);
-	if (crp->cr_psstat)
-		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-	else
-		status = dev->pma_sample_status;
-	p->sample_status = cpu_to_be16(status);
-	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
-		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-		    cpu_to_be32(
-			get_counter(dev, crp, dev->pma_counter_select[i]));
-
-	return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
-					      struct ib_device *ibdev)
-{
-	struct ib_pma_portsamplesresult_ext *p =
-		(struct ib_pma_portsamplesresult_ext *)pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-	u8 status;
-	int i;
-
-	memset(pmp->data, 0, sizeof(pmp->data));
-	p->tag = cpu_to_be16(dev->pma_tag);
-	if (crp->cr_psstat)
-		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-	else
-		status = dev->pma_sample_status;
-	p->sample_status = cpu_to_be16(status);
-	/* 64 bits */
-	p->extended_width = cpu_to_be32(0x80000000);
-	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
-		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-		    cpu_to_be64(
-			get_counter(dev, crp, dev->pma_counter_select[i]));
-
-	return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
-				     struct ib_device *ibdev, u8 port)
-{
-	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-		pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_verbs_counters cntrs;
-	u8 port_select = p->port_select;
-
-	ipath_get_counters(dev->dd, &cntrs);
-
-	/* Adjust counters for any resets done. */
-	cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
-	cntrs.link_error_recovery_counter -=
-		dev->z_link_error_recovery_counter;
-	cntrs.link_downed_counter -= dev->z_link_downed_counter;
-	cntrs.port_rcv_errors += dev->rcv_errors;
-	cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
-	cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
-	cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
-	cntrs.port_xmit_data -= dev->z_port_xmit_data;
-	cntrs.port_rcv_data -= dev->z_port_rcv_data;
-	cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
-	cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
-	cntrs.local_link_integrity_errors -=
-		dev->z_local_link_integrity_errors;
-	cntrs.excessive_buffer_overrun_errors -=
-		dev->z_excessive_buffer_overrun_errors;
-	cntrs.vl15_dropped -= dev->z_vl15_dropped;
-	cntrs.vl15_dropped += dev->n_vl15_dropped;
-
-	memset(pmp->data, 0, sizeof(pmp->data));
-
-	p->port_select = port_select;
-	if (pmp->mad_hdr.attr_mod != 0 ||
-	    (port_select != port && port_select != 0xFF))
-		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-	if (cntrs.symbol_error_counter > 0xFFFFUL)
-		p->symbol_error_counter = cpu_to_be16(0xFFFF);
-	else
-		p->symbol_error_counter =
-			cpu_to_be16((u16)cntrs.symbol_error_counter);
-	if (cntrs.link_error_recovery_counter > 0xFFUL)
-		p->link_error_recovery_counter = 0xFF;
-	else
-		p->link_error_recovery_counter =
-			(u8)cntrs.link_error_recovery_counter;
-	if (cntrs.link_downed_counter > 0xFFUL)
-		p->link_downed_counter = 0xFF;
-	else
-		p->link_downed_counter = (u8)cntrs.link_downed_counter;
-	if (cntrs.port_rcv_errors > 0xFFFFUL)
-		p->port_rcv_errors = cpu_to_be16(0xFFFF);
-	else
-		p->port_rcv_errors =
-			cpu_to_be16((u16) cntrs.port_rcv_errors);
-	if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
-		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
-	else
-		p->port_rcv_remphys_errors =
-			cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
-	if (cntrs.port_xmit_discards > 0xFFFFUL)
-		p->port_xmit_discards = cpu_to_be16(0xFFFF);
-	else
-		p->port_xmit_discards =
-			cpu_to_be16((u16)cntrs.port_xmit_discards);
-	if (cntrs.local_link_integrity_errors > 0xFUL)
-		cntrs.local_link_integrity_errors = 0xFUL;
-	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
-		cntrs.excessive_buffer_overrun_errors = 0xFUL;
-	p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
-		cntrs.excessive_buffer_overrun_errors;
-	if (cntrs.vl15_dropped > 0xFFFFUL)
-		p->vl15_dropped = cpu_to_be16(0xFFFF);
-	else
-		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
-	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
-		p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
-	else
-		p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
-	if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
-		p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
-	else
-		p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
-	if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
-		p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
-	else
-		p->port_xmit_packets =
-			cpu_to_be32((u32)cntrs.port_xmit_packets);
-	if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
-		p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
-	else
-		p->port_rcv_packets =
-			cpu_to_be32((u32) cntrs.port_rcv_packets);
-
-	return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
-					 struct ib_device *ibdev, u8 port)
-{
-	struct ib_pma_portcounters_ext *p =
-		(struct ib_pma_portcounters_ext *)pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	u64 swords, rwords, spkts, rpkts, xwait;
-	u8 port_select = p->port_select;
-
-	ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-				&rpkts, &xwait);
-
-	/* Adjust counters for any resets done. */
-	swords -= dev->z_port_xmit_data;
-	rwords -= dev->z_port_rcv_data;
-	spkts -= dev->z_port_xmit_packets;
-	rpkts -= dev->z_port_rcv_packets;
-
-	memset(pmp->data, 0, sizeof(pmp->data));
-
-	p->port_select = port_select;
-	if (pmp->mad_hdr.attr_mod != 0 ||
-	    (port_select != port && port_select != 0xFF))
-		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-	p->port_xmit_data = cpu_to_be64(swords);
-	p->port_rcv_data = cpu_to_be64(rwords);
-	p->port_xmit_packets = cpu_to_be64(spkts);
-	p->port_rcv_packets = cpu_to_be64(rpkts);
-	p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
-	p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
-	p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
-	p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
-
-	return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
-				     struct ib_device *ibdev, u8 port)
-{
-	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-		pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_verbs_counters cntrs;
-
-	/*
-	 * Since the HW doesn't support clearing counters, we save the
-	 * current count and subtract it from future responses.
-	 */
-	ipath_get_counters(dev->dd, &cntrs);
-
-	if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
-		dev->z_symbol_error_counter = cntrs.symbol_error_counter;
-
-	if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
-		dev->z_link_error_recovery_counter =
-			cntrs.link_error_recovery_counter;
-
-	if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
-		dev->z_link_downed_counter = cntrs.link_downed_counter;
-
-	if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
-		dev->z_port_rcv_errors =
-			cntrs.port_rcv_errors + dev->rcv_errors;
-
-	if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
-		dev->z_port_rcv_remphys_errors =
-			cntrs.port_rcv_remphys_errors;
-
-	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
-		dev->z_port_xmit_discards = cntrs.port_xmit_discards;
-
-	if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
-		dev->z_local_link_integrity_errors =
-			cntrs.local_link_integrity_errors;
-
-	if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
-		dev->z_excessive_buffer_overrun_errors =
-			cntrs.excessive_buffer_overrun_errors;
-
-	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
-		dev->n_vl15_dropped = 0;
-		dev->z_vl15_dropped = cntrs.vl15_dropped;
-	}
-
-	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
-		dev->z_port_xmit_data = cntrs.port_xmit_data;
-
-	if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
-		dev->z_port_rcv_data = cntrs.port_rcv_data;
-
-	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
-		dev->z_port_xmit_packets = cntrs.port_xmit_packets;
-
-	if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
-		dev->z_port_rcv_packets = cntrs.port_rcv_packets;
-
-	return recv_pma_get_portcounters(pmp, ibdev, port);
-}
-
-static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
-					 struct ib_device *ibdev, u8 port)
-{
-	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-		pmp->data;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	u64 swords, rwords, spkts, rpkts, xwait;
-
-	ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-				&rpkts, &xwait);
-
-	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
-		dev->z_port_xmit_data = swords;
-
-	if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
-		dev->z_port_rcv_data = rwords;
-
-	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
-		dev->z_port_xmit_packets = spkts;
-
-	if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
-		dev->z_port_rcv_packets = rpkts;
-
-	if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
-		dev->n_unicast_xmit = 0;
-
-	if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
-		dev->n_unicast_rcv = 0;
-
-	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
-		dev->n_multicast_xmit = 0;
-
-	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
-		dev->n_multicast_rcv = 0;
-
-	return recv_pma_get_portcounters_ext(pmp, ibdev, port);
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
-			u8 port_num, const struct ib_mad *in_mad,
-			struct ib_mad *out_mad)
-{
-	struct ib_smp *smp = (struct ib_smp *)out_mad;
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	int ret;
-
-	*out_mad = *in_mad;
-	if (smp->class_version != 1) {
-		smp->status |= IB_SMP_UNSUP_VERSION;
-		ret = reply(smp);
-		goto bail;
-	}
-
-	/* Is the mkey in the process of expiring? */
-	if (dev->mkey_lease_timeout &&
-	    time_after_eq(jiffies, dev->mkey_lease_timeout)) {
-		/* Clear timeout and mkey protection field. */
-		dev->mkey_lease_timeout = 0;
-		dev->mkeyprot = 0;
-	}
-
-	/*
-	 * M_Key checking depends on
-	 * Portinfo:M_Key_protect_bits
-	 */
-	if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
-	    dev->mkey != smp->mkey &&
-	    (smp->method == IB_MGMT_METHOD_SET ||
-	     (smp->method == IB_MGMT_METHOD_GET &&
-	      dev->mkeyprot >= 2))) {
-		if (dev->mkey_violations != 0xFFFF)
-			++dev->mkey_violations;
-		if (dev->mkey_lease_timeout ||
-		    dev->mkey_lease_period == 0) {
-			ret = IB_MAD_RESULT_SUCCESS |
-				IB_MAD_RESULT_CONSUMED;
-			goto bail;
-		}
-		dev->mkey_lease_timeout = jiffies +
-			dev->mkey_lease_period * HZ;
-		/* Future: Generate a trap notice. */
-		ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-		goto bail;
-	} else if (dev->mkey_lease_timeout)
-		dev->mkey_lease_timeout = 0;
-
-	switch (smp->method) {
-	case IB_MGMT_METHOD_GET:
-		switch (smp->attr_id) {
-		case IB_SMP_ATTR_NODE_DESC:
-			ret = recv_subn_get_nodedescription(smp, ibdev);
-			goto bail;
-		case IB_SMP_ATTR_NODE_INFO:
-			ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
-			goto bail;
-		case IB_SMP_ATTR_GUID_INFO:
-			ret = recv_subn_get_guidinfo(smp, ibdev);
-			goto bail;
-		case IB_SMP_ATTR_PORT_INFO:
-			ret = recv_subn_get_portinfo(smp, ibdev, port_num);
-			goto bail;
-		case IB_SMP_ATTR_PKEY_TABLE:
-			ret = recv_subn_get_pkeytable(smp, ibdev);
-			goto bail;
-		case IB_SMP_ATTR_SM_INFO:
-			if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
-				ret = IB_MAD_RESULT_SUCCESS |
-					IB_MAD_RESULT_CONSUMED;
-				goto bail;
-			}
-			if (dev->port_cap_flags & IB_PORT_SM) {
-				ret = IB_MAD_RESULT_SUCCESS;
-				goto bail;
-			}
-			/* FALLTHROUGH */
-		default:
-			smp->status |= IB_SMP_UNSUP_METH_ATTR;
-			ret = reply(smp);
-			goto bail;
-		}
-
-	case IB_MGMT_METHOD_SET:
-		switch (smp->attr_id) {
-		case IB_SMP_ATTR_GUID_INFO:
-			ret = recv_subn_set_guidinfo(smp, ibdev);
-			goto bail;
-		case IB_SMP_ATTR_PORT_INFO:
-			ret = recv_subn_set_portinfo(smp, ibdev, port_num);
-			goto bail;
-		case IB_SMP_ATTR_PKEY_TABLE:
-			ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
-			goto bail;
-		case IB_SMP_ATTR_SM_INFO:
-			if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
-				ret = IB_MAD_RESULT_SUCCESS |
-					IB_MAD_RESULT_CONSUMED;
-				goto bail;
-			}
-			if (dev->port_cap_flags & IB_PORT_SM) {
-				ret = IB_MAD_RESULT_SUCCESS;
-				goto bail;
-			}
-			/* FALLTHROUGH */
-		default:
-			smp->status |= IB_SMP_UNSUP_METH_ATTR;
-			ret = reply(smp);
-			goto bail;
-		}
-
-	case IB_MGMT_METHOD_TRAP:
-	case IB_MGMT_METHOD_REPORT:
-	case IB_MGMT_METHOD_REPORT_RESP:
-	case IB_MGMT_METHOD_TRAP_REPRESS:
-	case IB_MGMT_METHOD_GET_RESP:
-		/*
-		 * The ib_mad module will call us to process responses
-		 * before checking for other consumers.
-		 * Just tell the caller to process it normally.
-		 */
-		ret = IB_MAD_RESULT_SUCCESS;
-		goto bail;
-	default:
-		smp->status |= IB_SMP_UNSUP_METHOD;
-		ret = reply(smp);
-	}
-
-bail:
-	return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port_num,
-			const struct ib_mad *in_mad,
-			struct ib_mad *out_mad)
-{
-	struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
-	int ret;
-
-	*out_mad = *in_mad;
-	if (pmp->mad_hdr.class_version != 1) {
-		pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
-		ret = reply((struct ib_smp *) pmp);
-		goto bail;
-	}
-
-	switch (pmp->mad_hdr.method) {
-	case IB_MGMT_METHOD_GET:
-		switch (pmp->mad_hdr.attr_id) {
-		case IB_PMA_CLASS_PORT_INFO:
-			ret = recv_pma_get_classportinfo(pmp);
-			goto bail;
-		case IB_PMA_PORT_SAMPLES_CONTROL:
-			ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
-							      port_num);
-			goto bail;
-		case IB_PMA_PORT_SAMPLES_RESULT:
-			ret = recv_pma_get_portsamplesresult(pmp, ibdev);
-			goto bail;
-		case IB_PMA_PORT_SAMPLES_RESULT_EXT:
-			ret = recv_pma_get_portsamplesresult_ext(pmp,
-								 ibdev);
-			goto bail;
-		case IB_PMA_PORT_COUNTERS:
-			ret = recv_pma_get_portcounters(pmp, ibdev,
-							port_num);
-			goto bail;
-		case IB_PMA_PORT_COUNTERS_EXT:
-			ret = recv_pma_get_portcounters_ext(pmp, ibdev,
-							    port_num);
-			goto bail;
-		default:
-			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
-			ret = reply((struct ib_smp *) pmp);
-			goto bail;
-		}
-
-	case IB_MGMT_METHOD_SET:
-		switch (pmp->mad_hdr.attr_id) {
-		case IB_PMA_PORT_SAMPLES_CONTROL:
-			ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
-							      port_num);
-			goto bail;
-		case IB_PMA_PORT_COUNTERS:
-			ret = recv_pma_set_portcounters(pmp, ibdev,
-							port_num);
-			goto bail;
-		case IB_PMA_PORT_COUNTERS_EXT:
-			ret = recv_pma_set_portcounters_ext(pmp, ibdev,
-							    port_num);
-			goto bail;
-		default:
-			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
-			ret = reply((struct ib_smp *) pmp);
-			goto bail;
-		}
-
-	case IB_MGMT_METHOD_GET_RESP:
-		/*
-		 * The ib_mad module will call us to process responses
-		 * before checking for other consumers.
-		 * Just tell the caller to process it normally.
-		 */
-		ret = IB_MAD_RESULT_SUCCESS;
-		goto bail;
-	default:
-		pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
-		ret = reply((struct ib_smp *) pmp);
-	}
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port_num: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-		      const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-		      const struct ib_mad_hdr *in, size_t in_mad_size,
-		      struct ib_mad_hdr *out, size_t *out_mad_size,
-		      u16 *out_mad_pkey_index)
-{
-	int ret;
-	const struct ib_mad *in_mad = (const struct ib_mad *)in;
-	struct ib_mad *out_mad = (struct ib_mad *)out;
-
-	if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
-			 *out_mad_size != sizeof(*out_mad)))
-		return IB_MAD_RESULT_FAILURE;
-
-	switch (in_mad->mad_hdr.mgmt_class) {
-	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
-	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
-		ret = process_subn(ibdev, mad_flags, port_num,
-				   in_mad, out_mad);
-		goto bail;
-	case IB_MGMT_CLASS_PERF_MGMT:
-		ret = process_perf(ibdev, port_num, in_mad, out_mad);
-		goto bail;
-	default:
-		ret = IB_MAD_RESULT_SUCCESS;
-	}
-
-bail:
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mmap.c b/drivers/staging/rdma/ipath/ipath_mmap.c
deleted file mode 100644
index e732742..0000000
--- a/drivers/staging/rdma/ipath/ipath_mmap.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <asm/pgtable.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct ipath_mmap_info
- */
-void ipath_release_mmap_info(struct kref *ref)
-{
-	struct ipath_mmap_info *ip =
-		container_of(ref, struct ipath_mmap_info, ref);
-	struct ipath_ibdev *dev = to_idev(ip->context->device);
-
-	spin_lock_irq(&dev->pending_lock);
-	list_del(&ip->pending_mmaps);
-	spin_unlock_irq(&dev->pending_lock);
-
-	vfree(ip->obj);
-	kfree(ip);
-}
-
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void ipath_vma_open(struct vm_area_struct *vma)
-{
-	struct ipath_mmap_info *ip = vma->vm_private_data;
-
-	kref_get(&ip->ref);
-}
-
-static void ipath_vma_close(struct vm_area_struct *vma)
-{
-	struct ipath_mmap_info *ip = vma->vm_private_data;
-
-	kref_put(&ip->ref, ipath_release_mmap_info);
-}
-
-static const struct vm_operations_struct ipath_vm_ops = {
-	.open =     ipath_vma_open,
-	.close =    ipath_vma_close,
-};
-
-/**
- * ipath_mmap - create a new mmap region
- * @context: the IB user context of the process making the mmap() call
- * @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
- */
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-	struct ipath_ibdev *dev = to_idev(context->device);
-	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-	unsigned long size = vma->vm_end - vma->vm_start;
-	struct ipath_mmap_info *ip, *pp;
-	int ret = -EINVAL;
-
-	/*
-	 * Search the device's list of objects waiting for a mmap call.
-	 * Normally, this list is very short since a call to create a
-	 * CQ, QP, or SRQ is soon followed by a call to mmap().
-	 */
-	spin_lock_irq(&dev->pending_lock);
-	list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
-				 pending_mmaps) {
-		/* Only the creator is allowed to mmap the object */
-		if (context != ip->context || (__u64) offset != ip->offset)
-			continue;
-		/* Don't allow a mmap larger than the object. */
-		if (size > ip->size)
-			break;
-
-		list_del_init(&ip->pending_mmaps);
-		spin_unlock_irq(&dev->pending_lock);
-
-		ret = remap_vmalloc_range(vma, ip->obj, 0);
-		if (ret)
-			goto done;
-		vma->vm_ops = &ipath_vm_ops;
-		vma->vm_private_data = ip;
-		ipath_vma_open(vma);
-		goto done;
-	}
-	spin_unlock_irq(&dev->pending_lock);
-done:
-	return ret;
-}
-
-/*
- * Allocate information for ipath_mmap
- */
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
-					       u32 size,
-					       struct ib_ucontext *context,
-					       void *obj) {
-	struct ipath_mmap_info *ip;
-
-	ip = kmalloc(sizeof *ip, GFP_KERNEL);
-	if (!ip)
-		goto bail;
-
-	size = PAGE_ALIGN(size);
-
-	spin_lock_irq(&dev->mmap_offset_lock);
-	if (dev->mmap_offset == 0)
-		dev->mmap_offset = PAGE_SIZE;
-	ip->offset = dev->mmap_offset;
-	dev->mmap_offset += size;
-	spin_unlock_irq(&dev->mmap_offset_lock);
-
-	INIT_LIST_HEAD(&ip->pending_mmaps);
-	ip->size = size;
-	ip->context = context;
-	ip->obj = obj;
-	kref_init(&ip->ref);
-
-bail:
-	return ip;
-}
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
-			    struct ipath_mmap_info *ip,
-			    u32 size, void *obj) {
-	size = PAGE_ALIGN(size);
-
-	spin_lock_irq(&dev->mmap_offset_lock);
-	if (dev->mmap_offset == 0)
-		dev->mmap_offset = PAGE_SIZE;
-	ip->offset = dev->mmap_offset;
-	dev->mmap_offset += size;
-	spin_unlock_irq(&dev->mmap_offset_lock);
-
-	ip->size = size;
-	ip->obj = obj;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mr.c b/drivers/staging/rdma/ipath/ipath_mr.c
deleted file mode 100644
index b76b0ce..0000000
--- a/drivers/staging/rdma/ipath/ipath_mr.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-
-/* Fast memory region */
-struct ipath_fmr {
-	struct ib_fmr ibfmr;
-	u8 page_shift;
-	struct ipath_mregion mr;        /* must be last */
-};
-
-static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
-	return container_of(ibfmr, struct ipath_fmr, ibfmr);
-}
-
-/**
- * ipath_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see ipath_dma.c).
- */
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
-{
-	struct ipath_mr *mr;
-	struct ib_mr *ret;
-
-	mr = kzalloc(sizeof *mr, GFP_KERNEL);
-	if (!mr) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
-	mr->mr.access_flags = acc;
-	ret = &mr->ibmr;
-
-bail:
-	return ret;
-}
-
-static struct ipath_mr *alloc_mr(int count,
-				 struct ipath_lkey_table *lk_table)
-{
-	struct ipath_mr *mr;
-	int m, i = 0;
-
-	/* Allocate struct plus pointers to first level page tables. */
-	m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
-	mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
-	if (!mr)
-		goto done;
-
-	/* Allocate first level page tables. */
-	for (; i < m; i++) {
-		mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
-		if (!mr->mr.map[i])
-			goto bail;
-	}
-	mr->mr.mapsz = m;
-
-	if (!ipath_alloc_lkey(lk_table, &mr->mr))
-		goto bail;
-	mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
-
-	goto done;
-
-bail:
-	while (i) {
-		i--;
-		kfree(mr->mr.map[i]);
-	}
-	kfree(mr);
-	mr = NULL;
-
-done:
-	return mr;
-}
-
-/**
- * ipath_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @virt_addr: virtual address to use (from HCA's point of view)
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the InfiniPath driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-				u64 virt_addr, int mr_access_flags,
-				struct ib_udata *udata)
-{
-	struct ipath_mr *mr;
-	struct ib_umem *umem;
-	int n, m, entry;
-	struct scatterlist *sg;
-	struct ib_mr *ret;
-
-	if (length == 0) {
-		ret = ERR_PTR(-EINVAL);
-		goto bail;
-	}
-
-	umem = ib_umem_get(pd->uobject->context, start, length,
-			   mr_access_flags, 0);
-	if (IS_ERR(umem))
-		return (void *) umem;
-
-	n = umem->nmap;
-	mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
-	if (!mr) {
-		ret = ERR_PTR(-ENOMEM);
-		ib_umem_release(umem);
-		goto bail;
-	}
-
-	mr->mr.pd = pd;
-	mr->mr.user_base = start;
-	mr->mr.iova = virt_addr;
-	mr->mr.length = length;
-	mr->mr.offset = ib_umem_offset(umem);
-	mr->mr.access_flags = mr_access_flags;
-	mr->mr.max_segs = n;
-	mr->umem = umem;
-
-	m = 0;
-	n = 0;
-	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-		void *vaddr;
-
-		vaddr = page_address(sg_page(sg));
-		if (!vaddr) {
-			ret = ERR_PTR(-EINVAL);
-			goto bail;
-		}
-		mr->mr.map[m]->segs[n].vaddr = vaddr;
-		mr->mr.map[m]->segs[n].length = umem->page_size;
-		n++;
-		if (n == IPATH_SEGSZ) {
-			m++;
-			n = 0;
-		}
-	}
-	ret = &mr->ibmr;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by ipath_get_dma_mr()
- * or ipath_reg_user_mr().
- */
-int ipath_dereg_mr(struct ib_mr *ibmr)
-{
-	struct ipath_mr *mr = to_imr(ibmr);
-	int i;
-
-	ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
-	i = mr->mr.mapsz;
-	while (i) {
-		i--;
-		kfree(mr->mr.map[i]);
-	}
-
-	if (mr->umem)
-		ib_umem_release(mr->umem);
-
-	kfree(mr);
-	return 0;
-}
-
-/**
- * ipath_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-			       struct ib_fmr_attr *fmr_attr)
-{
-	struct ipath_fmr *fmr;
-	int m, i = 0;
-	struct ib_fmr *ret;
-
-	/* Allocate struct plus pointers to first level page tables. */
-	m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
-	fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
-	if (!fmr)
-		goto bail;
-
-	/* Allocate first level page tables. */
-	for (; i < m; i++) {
-		fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
-					 GFP_KERNEL);
-		if (!fmr->mr.map[i])
-			goto bail;
-	}
-	fmr->mr.mapsz = m;
-
-	/*
-	 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
-	 * rkey.
-	 */
-	if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
-		goto bail;
-	fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
-	/*
-	 * Resources are allocated but no valid mapping (RKEY can't be
-	 * used).
-	 */
-	fmr->mr.pd = pd;
-	fmr->mr.user_base = 0;
-	fmr->mr.iova = 0;
-	fmr->mr.length = 0;
-	fmr->mr.offset = 0;
-	fmr->mr.access_flags = mr_access_flags;
-	fmr->mr.max_segs = fmr_attr->max_pages;
-	fmr->page_shift = fmr_attr->page_shift;
-
-	ret = &fmr->ibfmr;
-	goto done;
-
-bail:
-	while (i)
-		kfree(fmr->mr.map[--i]);
-	kfree(fmr);
-	ret = ERR_PTR(-ENOMEM);
-
-done:
-	return ret;
-}
-
-/**
- * ipath_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
-		       int list_len, u64 iova)
-{
-	struct ipath_fmr *fmr = to_ifmr(ibfmr);
-	struct ipath_lkey_table *rkt;
-	unsigned long flags;
-	int m, n, i;
-	u32 ps;
-	int ret;
-
-	if (list_len > fmr->mr.max_segs) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	rkt = &to_idev(ibfmr->device)->lk_table;
-	spin_lock_irqsave(&rkt->lock, flags);
-	fmr->mr.user_base = iova;
-	fmr->mr.iova = iova;
-	ps = 1 << fmr->page_shift;
-	fmr->mr.length = list_len * ps;
-	m = 0;
-	n = 0;
-	ps = 1 << fmr->page_shift;
-	for (i = 0; i < list_len; i++) {
-		fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
-		fmr->mr.map[m]->segs[n].length = ps;
-		if (++n == IPATH_SEGSZ) {
-			m++;
-			n = 0;
-		}
-	}
-	spin_unlock_irqrestore(&rkt->lock, flags);
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int ipath_unmap_fmr(struct list_head *fmr_list)
-{
-	struct ipath_fmr *fmr;
-	struct ipath_lkey_table *rkt;
-	unsigned long flags;
-
-	list_for_each_entry(fmr, fmr_list, ibfmr.list) {
-		rkt = &to_idev(fmr->ibfmr.device)->lk_table;
-		spin_lock_irqsave(&rkt->lock, flags);
-		fmr->mr.user_base = 0;
-		fmr->mr.iova = 0;
-		fmr->mr.length = 0;
-		spin_unlock_irqrestore(&rkt->lock, flags);
-	}
-	return 0;
-}
-
-/**
- * ipath_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
-{
-	struct ipath_fmr *fmr = to_ifmr(ibfmr);
-	int i;
-
-	ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
-	i = fmr->mr.mapsz;
-	while (i)
-		kfree(fmr->mr.map[--i]);
-	kfree(fmr);
-	return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_qp.c b/drivers/staging/rdma/ipath/ipath_qp.c
deleted file mode 100644
index 280cd2d..0000000
--- a/drivers/staging/rdma/ipath/ipath_qp.c
+++ /dev/null
@@ -1,1079 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-#define BITS_PER_PAGE		(PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
-#define mk_qpn(qpt, map, off)	(((map) - (qpt)->map) * BITS_PER_PAGE + \
-				 (off))
-#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
-						      BITS_PER_PAGE, off)
-
-/*
- * Convert the AETH credit code into the number of credits.
- */
-static u32 credit_table[31] = {
-	0,			/* 0 */
-	1,			/* 1 */
-	2,			/* 2 */
-	3,			/* 3 */
-	4,			/* 4 */
-	6,			/* 5 */
-	8,			/* 6 */
-	12,			/* 7 */
-	16,			/* 8 */
-	24,			/* 9 */
-	32,			/* A */
-	48,			/* B */
-	64,			/* C */
-	96,			/* D */
-	128,			/* E */
-	192,			/* F */
-	256,			/* 10 */
-	384,			/* 11 */
-	512,			/* 12 */
-	768,			/* 13 */
-	1024,			/* 14 */
-	1536,			/* 15 */
-	2048,			/* 16 */
-	3072,			/* 17 */
-	4096,			/* 18 */
-	6144,			/* 19 */
-	8192,			/* 1A */
-	12288,			/* 1B */
-	16384,			/* 1C */
-	24576,			/* 1D */
-	32768			/* 1E */
-};
-
-
-static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
-{
-	unsigned long page = get_zeroed_page(GFP_KERNEL);
-	unsigned long flags;
-
-	/*
-	 * Free the page if someone raced with us installing it.
-	 */
-
-	spin_lock_irqsave(&qpt->lock, flags);
-	if (map->page)
-		free_page(page);
-	else
-		map->page = (void *)page;
-	spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-
-static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
-{
-	u32 i, offset, max_scan, qpn;
-	struct qpn_map *map;
-	u32 ret = -1;
-
-	if (type == IB_QPT_SMI)
-		ret = 0;
-	else if (type == IB_QPT_GSI)
-		ret = 1;
-
-	if (ret != -1) {
-		map = &qpt->map[0];
-		if (unlikely(!map->page)) {
-			get_map_page(qpt, map);
-			if (unlikely(!map->page)) {
-				ret = -ENOMEM;
-				goto bail;
-			}
-		}
-		if (!test_and_set_bit(ret, map->page))
-			atomic_dec(&map->n_free);
-		else
-			ret = -EBUSY;
-		goto bail;
-	}
-
-	qpn = qpt->last + 1;
-	if (qpn >= QPN_MAX)
-		qpn = 2;
-	offset = qpn & BITS_PER_PAGE_MASK;
-	map = &qpt->map[qpn / BITS_PER_PAGE];
-	max_scan = qpt->nmaps - !offset;
-	for (i = 0;;) {
-		if (unlikely(!map->page)) {
-			get_map_page(qpt, map);
-			if (unlikely(!map->page))
-				break;
-		}
-		if (likely(atomic_read(&map->n_free))) {
-			do {
-				if (!test_and_set_bit(offset, map->page)) {
-					atomic_dec(&map->n_free);
-					qpt->last = qpn;
-					ret = qpn;
-					goto bail;
-				}
-				offset = find_next_offset(map, offset);
-				qpn = mk_qpn(qpt, map, offset);
-				/*
-				 * This test differs from alloc_pidmap().
-				 * If find_next_offset() does find a zero
-				 * bit, we don't need to check for QPN
-				 * wrapping around past our starting QPN.
-				 * We just need to be sure we don't loop
-				 * forever.
-				 */
-			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
-		}
-		/*
-		 * In order to keep the number of pages allocated to a
-		 * minimum, we scan the all existing pages before increasing
-		 * the size of the bitmap table.
-		 */
-		if (++i > max_scan) {
-			if (qpt->nmaps == QPNMAP_ENTRIES)
-				break;
-			map = &qpt->map[qpt->nmaps++];
-			offset = 0;
-		} else if (map < &qpt->map[qpt->nmaps]) {
-			++map;
-			offset = 0;
-		} else {
-			map = &qpt->map[0];
-			offset = 2;
-		}
-		qpn = mk_qpn(qpt, map, offset);
-	}
-
-	ret = -ENOMEM;
-
-bail:
-	return ret;
-}
-
-static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
-	struct qpn_map *map;
-
-	map = qpt->map + qpn / BITS_PER_PAGE;
-	if (map->page)
-		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
-	atomic_inc(&map->n_free);
-}
-
-/**
- * ipath_alloc_qpn - allocate a QP number
- * @qpt: the QP table
- * @qp: the QP
- * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
- *
- * Allocate the next available QPN and put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
-			   enum ib_qp_type type)
-{
-	unsigned long flags;
-	int ret;
-
-	ret = alloc_qpn(qpt, type);
-	if (ret < 0)
-		goto bail;
-	qp->ibqp.qp_num = ret;
-
-	/* Add the QP to the hash table. */
-	spin_lock_irqsave(&qpt->lock, flags);
-
-	ret %= qpt->max;
-	qp->next = qpt->table[ret];
-	qpt->table[ret] = qp;
-	atomic_inc(&qp->refcount);
-
-	spin_unlock_irqrestore(&qpt->lock, flags);
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_free_qp - remove a QP from the QP table
- * @qpt: the QP table
- * @qp: the QP to remove
- *
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
-{
-	struct ipath_qp *q, **qpp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&qpt->lock, flags);
-
-	/* Remove QP from the hash table. */
-	qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
-	for (; (q = *qpp) != NULL; qpp = &q->next) {
-		if (q == qp) {
-			*qpp = qp->next;
-			qp->next = NULL;
-			atomic_dec(&qp->refcount);
-			break;
-		}
-	}
-
-	spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-/**
- * ipath_free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
- */
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
-{
-	unsigned long flags;
-	struct ipath_qp *qp;
-	u32 n, qp_inuse = 0;
-
-	spin_lock_irqsave(&qpt->lock, flags);
-	for (n = 0; n < qpt->max; n++) {
-		qp = qpt->table[n];
-		qpt->table[n] = NULL;
-
-		for (; qp; qp = qp->next)
-			qp_inuse++;
-	}
-	spin_unlock_irqrestore(&qpt->lock, flags);
-
-	for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
-		if (qpt->map[n].page)
-			free_page((unsigned long) qpt->map[n].page);
-	return qp_inuse;
-}
-
-/**
- * ipath_lookup_qpn - return the QP with the given QPN
- * @qpt: the QP table
- * @qpn: the QP number to look up
- *
- * The caller is responsible for decrementing the QP reference count
- * when done.
- */
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
-	unsigned long flags;
-	struct ipath_qp *qp;
-
-	spin_lock_irqsave(&qpt->lock, flags);
-
-	for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
-		if (qp->ibqp.qp_num == qpn) {
-			atomic_inc(&qp->refcount);
-			break;
-		}
-	}
-
-	spin_unlock_irqrestore(&qpt->lock, flags);
-	return qp;
-}
-
-/**
- * ipath_reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
-{
-	qp->remote_qpn = 0;
-	qp->qkey = 0;
-	qp->qp_access_flags = 0;
-	atomic_set(&qp->s_dma_busy, 0);
-	qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
-	qp->s_hdrwords = 0;
-	qp->s_wqe = NULL;
-	qp->s_pkt_delay = 0;
-	qp->s_draining = 0;
-	qp->s_psn = 0;
-	qp->r_psn = 0;
-	qp->r_msn = 0;
-	if (type == IB_QPT_RC) {
-		qp->s_state = IB_OPCODE_RC_SEND_LAST;
-		qp->r_state = IB_OPCODE_RC_SEND_LAST;
-	} else {
-		qp->s_state = IB_OPCODE_UC_SEND_LAST;
-		qp->r_state = IB_OPCODE_UC_SEND_LAST;
-	}
-	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-	qp->r_nak_state = 0;
-	qp->r_aflags = 0;
-	qp->r_flags = 0;
-	qp->s_rnr_timeout = 0;
-	qp->s_head = 0;
-	qp->s_tail = 0;
-	qp->s_cur = 0;
-	qp->s_last = 0;
-	qp->s_ssn = 1;
-	qp->s_lsn = 0;
-	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
-	qp->r_head_ack_queue = 0;
-	qp->s_tail_ack_queue = 0;
-	qp->s_num_rd_atomic = 0;
-	if (qp->r_rq.wq) {
-		qp->r_rq.wq->head = 0;
-		qp->r_rq.wq->tail = 0;
-	}
-}
-
-/**
- * ipath_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	struct ib_wc wc;
-	int ret = 0;
-
-	if (qp->state == IB_QPS_ERR)
-		goto bail;
-
-	qp->state = IB_QPS_ERR;
-
-	spin_lock(&dev->pending_lock);
-	if (!list_empty(&qp->timerwait))
-		list_del_init(&qp->timerwait);
-	if (!list_empty(&qp->piowait))
-		list_del_init(&qp->piowait);
-	spin_unlock(&dev->pending_lock);
-
-	/* Schedule the sending tasklet to drain the send work queue. */
-	if (qp->s_last != qp->s_head)
-		ipath_schedule_send(qp);
-
-	memset(&wc, 0, sizeof(wc));
-	wc.qp = &qp->ibqp;
-	wc.opcode = IB_WC_RECV;
-
-	if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
-		wc.wr_id = qp->r_wr_id;
-		wc.status = err;
-		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-	}
-	wc.status = IB_WC_WR_FLUSH_ERR;
-
-	if (qp->r_rq.wq) {
-		struct ipath_rwq *wq;
-		u32 head;
-		u32 tail;
-
-		spin_lock(&qp->r_rq.lock);
-
-		/* sanity check pointers before trusting them */
-		wq = qp->r_rq.wq;
-		head = wq->head;
-		if (head >= qp->r_rq.size)
-			head = 0;
-		tail = wq->tail;
-		if (tail >= qp->r_rq.size)
-			tail = 0;
-		while (tail != head) {
-			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
-			if (++tail >= qp->r_rq.size)
-				tail = 0;
-			ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-		}
-		wq->tail = tail;
-
-		spin_unlock(&qp->r_rq.lock);
-	} else if (qp->ibqp.event_handler)
-		ret = 1;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for ipathverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-		    int attr_mask, struct ib_udata *udata)
-{
-	struct ipath_ibdev *dev = to_idev(ibqp->device);
-	struct ipath_qp *qp = to_iqp(ibqp);
-	enum ib_qp_state cur_state, new_state;
-	int lastwqe = 0;
-	int ret;
-
-	spin_lock_irq(&qp->s_lock);
-
-	cur_state = attr_mask & IB_QP_CUR_STATE ?
-		attr->cur_qp_state : qp->state;
-	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
-	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
-				attr_mask, IB_LINK_LAYER_UNSPECIFIED))
-		goto inval;
-
-	if (attr_mask & IB_QP_AV) {
-		if (attr->ah_attr.dlid == 0 ||
-		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
-			goto inval;
-
-		if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
-		    (attr->ah_attr.grh.sgid_index > 1))
-			goto inval;
-	}
-
-	if (attr_mask & IB_QP_PKEY_INDEX)
-		if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
-			goto inval;
-
-	if (attr_mask & IB_QP_MIN_RNR_TIMER)
-		if (attr->min_rnr_timer > 31)
-			goto inval;
-
-	if (attr_mask & IB_QP_PORT)
-		if (attr->port_num == 0 ||
-		    attr->port_num > ibqp->device->phys_port_cnt)
-			goto inval;
-
-	/*
-	 * don't allow invalid Path MTU values or greater than 2048
-	 * unless we are configured for a 4KB MTU
-	 */
-	if ((attr_mask & IB_QP_PATH_MTU) &&
-		(ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
-		(attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
-		goto inval;
-
-	if (attr_mask & IB_QP_PATH_MIG_STATE)
-		if (attr->path_mig_state != IB_MIG_MIGRATED &&
-		    attr->path_mig_state != IB_MIG_REARM)
-			goto inval;
-
-	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-		if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
-			goto inval;
-
-	switch (new_state) {
-	case IB_QPS_RESET:
-		if (qp->state != IB_QPS_RESET) {
-			qp->state = IB_QPS_RESET;
-			spin_lock(&dev->pending_lock);
-			if (!list_empty(&qp->timerwait))
-				list_del_init(&qp->timerwait);
-			if (!list_empty(&qp->piowait))
-				list_del_init(&qp->piowait);
-			spin_unlock(&dev->pending_lock);
-			qp->s_flags &= ~IPATH_S_ANY_WAIT;
-			spin_unlock_irq(&qp->s_lock);
-			/* Stop the sending tasklet */
-			tasklet_kill(&qp->s_task);
-			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-			spin_lock_irq(&qp->s_lock);
-		}
-		ipath_reset_qp(qp, ibqp->qp_type);
-		break;
-
-	case IB_QPS_SQD:
-		qp->s_draining = qp->s_last != qp->s_cur;
-		qp->state = new_state;
-		break;
-
-	case IB_QPS_SQE:
-		if (qp->ibqp.qp_type == IB_QPT_RC)
-			goto inval;
-		qp->state = new_state;
-		break;
-
-	case IB_QPS_ERR:
-		lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-		break;
-
-	default:
-		qp->state = new_state;
-		break;
-	}
-
-	if (attr_mask & IB_QP_PKEY_INDEX)
-		qp->s_pkey_index = attr->pkey_index;
-
-	if (attr_mask & IB_QP_DEST_QPN)
-		qp->remote_qpn = attr->dest_qp_num;
-
-	if (attr_mask & IB_QP_SQ_PSN) {
-		qp->s_psn = qp->s_next_psn = attr->sq_psn;
-		qp->s_last_psn = qp->s_next_psn - 1;
-	}
-
-	if (attr_mask & IB_QP_RQ_PSN)
-		qp->r_psn = attr->rq_psn;
-
-	if (attr_mask & IB_QP_ACCESS_FLAGS)
-		qp->qp_access_flags = attr->qp_access_flags;
-
-	if (attr_mask & IB_QP_AV) {
-		qp->remote_ah_attr = attr->ah_attr;
-		qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
-	}
-
-	if (attr_mask & IB_QP_PATH_MTU)
-		qp->path_mtu = attr->path_mtu;
-
-	if (attr_mask & IB_QP_RETRY_CNT)
-		qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
-
-	if (attr_mask & IB_QP_RNR_RETRY) {
-		qp->s_rnr_retry = attr->rnr_retry;
-		if (qp->s_rnr_retry > 7)
-			qp->s_rnr_retry = 7;
-		qp->s_rnr_retry_cnt = qp->s_rnr_retry;
-	}
-
-	if (attr_mask & IB_QP_MIN_RNR_TIMER)
-		qp->r_min_rnr_timer = attr->min_rnr_timer;
-
-	if (attr_mask & IB_QP_TIMEOUT)
-		qp->timeout = attr->timeout;
-
-	if (attr_mask & IB_QP_QKEY)
-		qp->qkey = attr->qkey;
-
-	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
-	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
-		qp->s_max_rd_atomic = attr->max_rd_atomic;
-
-	spin_unlock_irq(&qp->s_lock);
-
-	if (lastwqe) {
-		struct ib_event ev;
-
-		ev.device = qp->ibqp.device;
-		ev.element.qp = &qp->ibqp;
-		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-	}
-	ret = 0;
-	goto bail;
-
-inval:
-	spin_unlock_irq(&qp->s_lock);
-	ret = -EINVAL;
-
-bail:
-	return ret;
-}
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-		   int attr_mask, struct ib_qp_init_attr *init_attr)
-{
-	struct ipath_qp *qp = to_iqp(ibqp);
-
-	attr->qp_state = qp->state;
-	attr->cur_qp_state = attr->qp_state;
-	attr->path_mtu = qp->path_mtu;
-	attr->path_mig_state = 0;
-	attr->qkey = qp->qkey;
-	attr->rq_psn = qp->r_psn;
-	attr->sq_psn = qp->s_next_psn;
-	attr->dest_qp_num = qp->remote_qpn;
-	attr->qp_access_flags = qp->qp_access_flags;
-	attr->cap.max_send_wr = qp->s_size - 1;
-	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
-	attr->cap.max_send_sge = qp->s_max_sge;
-	attr->cap.max_recv_sge = qp->r_rq.max_sge;
-	attr->cap.max_inline_data = 0;
-	attr->ah_attr = qp->remote_ah_attr;
-	memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
-	attr->pkey_index = qp->s_pkey_index;
-	attr->alt_pkey_index = 0;
-	attr->en_sqd_async_notify = 0;
-	attr->sq_draining = qp->s_draining;
-	attr->max_rd_atomic = qp->s_max_rd_atomic;
-	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
-	attr->min_rnr_timer = qp->r_min_rnr_timer;
-	attr->port_num = 1;
-	attr->timeout = qp->timeout;
-	attr->retry_cnt = qp->s_retry_cnt;
-	attr->rnr_retry = qp->s_rnr_retry_cnt;
-	attr->alt_port_num = 0;
-	attr->alt_timeout = 0;
-
-	init_attr->event_handler = qp->ibqp.event_handler;
-	init_attr->qp_context = qp->ibqp.qp_context;
-	init_attr->send_cq = qp->ibqp.send_cq;
-	init_attr->recv_cq = qp->ibqp.recv_cq;
-	init_attr->srq = qp->ibqp.srq;
-	init_attr->cap = attr->cap;
-	if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
-		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
-	else
-		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
-	init_attr->qp_type = qp->ibqp.qp_type;
-	init_attr->port_num = 1;
-	return 0;
-}
-
-/**
- * ipath_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
-__be32 ipath_compute_aeth(struct ipath_qp *qp)
-{
-	u32 aeth = qp->r_msn & IPATH_MSN_MASK;
-
-	if (qp->ibqp.srq) {
-		/*
-		 * Shared receive queues don't generate credits.
-		 * Set the credit field to the invalid value.
-		 */
-		aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
-	} else {
-		u32 min, max, x;
-		u32 credits;
-		struct ipath_rwq *wq = qp->r_rq.wq;
-		u32 head;
-		u32 tail;
-
-		/* sanity check pointers before trusting them */
-		head = wq->head;
-		if (head >= qp->r_rq.size)
-			head = 0;
-		tail = wq->tail;
-		if (tail >= qp->r_rq.size)
-			tail = 0;
-		/*
-		 * Compute the number of credits available (RWQEs).
-		 * XXX Not holding the r_rq.lock here so there is a small
-		 * chance that the pair of reads are not atomic.
-		 */
-		credits = head - tail;
-		if ((int)credits < 0)
-			credits += qp->r_rq.size;
-		/*
-		 * Binary search the credit table to find the code to
-		 * use.
-		 */
-		min = 0;
-		max = 31;
-		for (;;) {
-			x = (min + max) / 2;
-			if (credit_table[x] == credits)
-				break;
-			if (credit_table[x] > credits)
-				max = x;
-			else if (min == x)
-				break;
-			else
-				min = x;
-		}
-		aeth |= x << IPATH_AETH_CREDIT_SHIFT;
-	}
-	return cpu_to_be32(aeth);
-}
-
-/**
- * ipath_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: unused by InfiniPath
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
-			      struct ib_qp_init_attr *init_attr,
-			      struct ib_udata *udata)
-{
-	struct ipath_qp *qp;
-	int err;
-	struct ipath_swqe *swq = NULL;
-	struct ipath_ibdev *dev;
-	size_t sz;
-	size_t sg_list_sz;
-	struct ib_qp *ret;
-
-	if (init_attr->create_flags) {
-		ret = ERR_PTR(-EINVAL);
-		goto bail;
-	}
-
-	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
-	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
-		ret = ERR_PTR(-EINVAL);
-		goto bail;
-	}
-
-	/* Check receive queue parameters if no SRQ is specified. */
-	if (!init_attr->srq) {
-		if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
-		    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
-			ret = ERR_PTR(-EINVAL);
-			goto bail;
-		}
-		if (init_attr->cap.max_send_sge +
-		    init_attr->cap.max_send_wr +
-		    init_attr->cap.max_recv_sge +
-		    init_attr->cap.max_recv_wr == 0) {
-			ret = ERR_PTR(-EINVAL);
-			goto bail;
-		}
-	}
-
-	switch (init_attr->qp_type) {
-	case IB_QPT_UC:
-	case IB_QPT_RC:
-	case IB_QPT_UD:
-	case IB_QPT_SMI:
-	case IB_QPT_GSI:
-		sz = sizeof(struct ipath_sge) *
-			init_attr->cap.max_send_sge +
-			sizeof(struct ipath_swqe);
-		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
-		if (swq == NULL) {
-			ret = ERR_PTR(-ENOMEM);
-			goto bail;
-		}
-		sz = sizeof(*qp);
-		sg_list_sz = 0;
-		if (init_attr->srq) {
-			struct ipath_srq *srq = to_isrq(init_attr->srq);
-
-			if (srq->rq.max_sge > 1)
-				sg_list_sz = sizeof(*qp->r_sg_list) *
-					(srq->rq.max_sge - 1);
-		} else if (init_attr->cap.max_recv_sge > 1)
-			sg_list_sz = sizeof(*qp->r_sg_list) *
-				(init_attr->cap.max_recv_sge - 1);
-		qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
-		if (!qp) {
-			ret = ERR_PTR(-ENOMEM);
-			goto bail_swq;
-		}
-		if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
-		    init_attr->qp_type == IB_QPT_SMI ||
-		    init_attr->qp_type == IB_QPT_GSI)) {
-			qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
-			if (!qp->r_ud_sg_list) {
-				ret = ERR_PTR(-ENOMEM);
-				goto bail_qp;
-			}
-		} else
-			qp->r_ud_sg_list = NULL;
-		if (init_attr->srq) {
-			sz = 0;
-			qp->r_rq.size = 0;
-			qp->r_rq.max_sge = 0;
-			qp->r_rq.wq = NULL;
-			init_attr->cap.max_recv_wr = 0;
-			init_attr->cap.max_recv_sge = 0;
-		} else {
-			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
-			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
-			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
-				sizeof(struct ipath_rwqe);
-			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
-					      qp->r_rq.size * sz);
-			if (!qp->r_rq.wq) {
-				ret = ERR_PTR(-ENOMEM);
-				goto bail_sg_list;
-			}
-		}
-
-		/*
-		 * ib_create_qp() will initialize qp->ibqp
-		 * except for qp->ibqp.qp_num.
-		 */
-		spin_lock_init(&qp->s_lock);
-		spin_lock_init(&qp->r_rq.lock);
-		atomic_set(&qp->refcount, 0);
-		init_waitqueue_head(&qp->wait);
-		init_waitqueue_head(&qp->wait_dma);
-		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
-		INIT_LIST_HEAD(&qp->piowait);
-		INIT_LIST_HEAD(&qp->timerwait);
-		qp->state = IB_QPS_RESET;
-		qp->s_wq = swq;
-		qp->s_size = init_attr->cap.max_send_wr + 1;
-		qp->s_max_sge = init_attr->cap.max_send_sge;
-		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
-			qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
-		else
-			qp->s_flags = 0;
-		dev = to_idev(ibpd->device);
-		err = ipath_alloc_qpn(&dev->qp_table, qp,
-				      init_attr->qp_type);
-		if (err) {
-			ret = ERR_PTR(err);
-			vfree(qp->r_rq.wq);
-			goto bail_sg_list;
-		}
-		qp->ip = NULL;
-		qp->s_tx = NULL;
-		ipath_reset_qp(qp, init_attr->qp_type);
-		break;
-
-	default:
-		/* Don't support raw QPs */
-		ret = ERR_PTR(-ENOSYS);
-		goto bail;
-	}
-
-	init_attr->cap.max_inline_data = 0;
-
-	/*
-	 * Return the address of the RWQ as the offset to mmap.
-	 * See ipath_mmap() for details.
-	 */
-	if (udata && udata->outlen >= sizeof(__u64)) {
-		if (!qp->r_rq.wq) {
-			__u64 offset = 0;
-
-			err = ib_copy_to_udata(udata, &offset,
-					       sizeof(offset));
-			if (err) {
-				ret = ERR_PTR(err);
-				goto bail_ip;
-			}
-		} else {
-			u32 s = sizeof(struct ipath_rwq) +
-				qp->r_rq.size * sz;
-
-			qp->ip =
-			    ipath_create_mmap_info(dev, s,
-						   ibpd->uobject->context,
-						   qp->r_rq.wq);
-			if (!qp->ip) {
-				ret = ERR_PTR(-ENOMEM);
-				goto bail_ip;
-			}
-
-			err = ib_copy_to_udata(udata, &(qp->ip->offset),
-					       sizeof(qp->ip->offset));
-			if (err) {
-				ret = ERR_PTR(err);
-				goto bail_ip;
-			}
-		}
-	}
-
-	spin_lock(&dev->n_qps_lock);
-	if (dev->n_qps_allocated == ib_ipath_max_qps) {
-		spin_unlock(&dev->n_qps_lock);
-		ret = ERR_PTR(-ENOMEM);
-		goto bail_ip;
-	}
-
-	dev->n_qps_allocated++;
-	spin_unlock(&dev->n_qps_lock);
-
-	if (qp->ip) {
-		spin_lock_irq(&dev->pending_lock);
-		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
-		spin_unlock_irq(&dev->pending_lock);
-	}
-
-	ret = &qp->ibqp;
-	goto bail;
-
-bail_ip:
-	if (qp->ip)
-		kref_put(&qp->ip->ref, ipath_release_mmap_info);
-	else
-		vfree(qp->r_rq.wq);
-	ipath_free_qp(&dev->qp_table, qp);
-	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
-bail_sg_list:
-	kfree(qp->r_ud_sg_list);
-bail_qp:
-	kfree(qp);
-bail_swq:
-	vfree(swq);
-bail:
-	return ret;
-}
-
-/**
- * ipath_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int ipath_destroy_qp(struct ib_qp *ibqp)
-{
-	struct ipath_qp *qp = to_iqp(ibqp);
-	struct ipath_ibdev *dev = to_idev(ibqp->device);
-
-	/* Make sure HW and driver activity is stopped. */
-	spin_lock_irq(&qp->s_lock);
-	if (qp->state != IB_QPS_RESET) {
-		qp->state = IB_QPS_RESET;
-		spin_lock(&dev->pending_lock);
-		if (!list_empty(&qp->timerwait))
-			list_del_init(&qp->timerwait);
-		if (!list_empty(&qp->piowait))
-			list_del_init(&qp->piowait);
-		spin_unlock(&dev->pending_lock);
-		qp->s_flags &= ~IPATH_S_ANY_WAIT;
-		spin_unlock_irq(&qp->s_lock);
-		/* Stop the sending tasklet */
-		tasklet_kill(&qp->s_task);
-		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-	} else
-		spin_unlock_irq(&qp->s_lock);
-
-	ipath_free_qp(&dev->qp_table, qp);
-
-	if (qp->s_tx) {
-		atomic_dec(&qp->refcount);
-		if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
-			kfree(qp->s_tx->txreq.map_addr);
-		spin_lock_irq(&dev->pending_lock);
-		list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
-		spin_unlock_irq(&dev->pending_lock);
-		qp->s_tx = NULL;
-	}
-
-	wait_event(qp->wait, !atomic_read(&qp->refcount));
-
-	/* all user's cleaned up, mark it available */
-	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
-	spin_lock(&dev->n_qps_lock);
-	dev->n_qps_allocated--;
-	spin_unlock(&dev->n_qps_lock);
-
-	if (qp->ip)
-		kref_put(&qp->ip->ref, ipath_release_mmap_info);
-	else
-		vfree(qp->r_rq.wq);
-	kfree(qp->r_ud_sg_list);
-	vfree(qp->s_wq);
-	kfree(qp);
-	return 0;
-}
-
-/**
- * ipath_init_qp_table - initialize the QP table for a device
- * @idev: the device who's QP table we're initializing
- * @size: the size of the QP table
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
-{
-	int i;
-	int ret;
-
-	idev->qp_table.last = 1;	/* QPN 0 and 1 are special. */
-	idev->qp_table.max = size;
-	idev->qp_table.nmaps = 1;
-	idev->qp_table.table = kcalloc(size, sizeof(*idev->qp_table.table),
-				       GFP_KERNEL);
-	if (idev->qp_table.table == NULL) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
-		atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
-		idev->qp_table.map[i].page = NULL;
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
-{
-	u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
-
-	/*
-	 * If the credit is invalid, we can send
-	 * as many packets as we like.  Otherwise, we have to
-	 * honor the credit field.
-	 */
-	if (credit == IPATH_AETH_CREDIT_INVAL)
-		qp->s_lsn = (u32) -1;
-	else if (qp->s_lsn != (u32) -1) {
-		/* Compute new LSN (i.e., MSN + credit) */
-		credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
-		if (ipath_cmp24(credit, qp->s_lsn) > 0)
-			qp->s_lsn = credit;
-	}
-
-	/* Restart sending if it was blocked due to lack of credits. */
-	if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
-	    qp->s_cur != qp->s_head &&
-	    (qp->s_lsn == (u32) -1 ||
-	     ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
-			 qp->s_lsn + 1) <= 0))
-		ipath_schedule_send(qp);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c
deleted file mode 100644
index d4aa535..0000000
--- a/drivers/staging/rdma/ipath/ipath_rc.c
+++ /dev/null
@@ -1,1969 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
-		       u32 psn, u32 pmtu)
-{
-	u32 len;
-
-	len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
-	ss->sge = wqe->sg_list[0];
-	ss->sg_list = wqe->sg_list + 1;
-	ss->num_sge = wqe->wr.num_sge;
-	ipath_skip_sge(ss, len);
-	return wqe->length - len;
-}
-
-/**
- * ipath_init_restart- initialize the qp->s_sge after a restart
- * @qp: the QP who's SGE we're restarting
- * @wqe: the work queue to initialize the QP's SGE from
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
-{
-	struct ipath_ibdev *dev;
-
-	qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
-				ib_mtu_enum_to_int(qp->path_mtu));
-	dev = to_idev(qp->ibqp.device);
-	spin_lock(&dev->pending_lock);
-	if (list_empty(&qp->timerwait))
-		list_add_tail(&qp->timerwait,
-			      &dev->pending[dev->pending_index]);
-	spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
-			     struct ipath_other_headers *ohdr, u32 pmtu)
-{
-	struct ipath_ack_entry *e;
-	u32 hwords;
-	u32 len;
-	u32 bth0;
-	u32 bth2;
-
-	/* Don't send an ACK if we aren't supposed to. */
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-		goto bail;
-
-	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
-	hwords = 5;
-
-	switch (qp->s_ack_state) {
-	case OP(RDMA_READ_RESPONSE_LAST):
-	case OP(RDMA_READ_RESPONSE_ONLY):
-	case OP(ATOMIC_ACKNOWLEDGE):
-		/*
-		 * We can increment the tail pointer now that the last
-		 * response has been sent instead of only being
-		 * constructed.
-		 */
-		if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
-			qp->s_tail_ack_queue = 0;
-		/* FALLTHROUGH */
-	case OP(SEND_ONLY):
-	case OP(ACKNOWLEDGE):
-		/* Check for no next entry in the queue. */
-		if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
-			if (qp->s_flags & IPATH_S_ACK_PENDING)
-				goto normal;
-			qp->s_ack_state = OP(ACKNOWLEDGE);
-			goto bail;
-		}
-
-		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
-		if (e->opcode == OP(RDMA_READ_REQUEST)) {
-			/* Copy SGE state in case we need to resend */
-			qp->s_ack_rdma_sge = e->rdma_sge;
-			qp->s_cur_sge = &qp->s_ack_rdma_sge;
-			len = e->rdma_sge.sge.sge_length;
-			if (len > pmtu) {
-				len = pmtu;
-				qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
-			} else {
-				qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
-				e->sent = 1;
-			}
-			ohdr->u.aeth = ipath_compute_aeth(qp);
-			hwords++;
-			qp->s_ack_rdma_psn = e->psn;
-			bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
-		} else {
-			/* COMPARE_SWAP or FETCH_ADD */
-			qp->s_cur_sge = NULL;
-			len = 0;
-			qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
-			ohdr->u.at.aeth = ipath_compute_aeth(qp);
-			ohdr->u.at.atomic_ack_eth[0] =
-				cpu_to_be32(e->atomic_data >> 32);
-			ohdr->u.at.atomic_ack_eth[1] =
-				cpu_to_be32(e->atomic_data);
-			hwords += sizeof(ohdr->u.at) / sizeof(u32);
-			bth2 = e->psn;
-			e->sent = 1;
-		}
-		bth0 = qp->s_ack_state << 24;
-		break;
-
-	case OP(RDMA_READ_RESPONSE_FIRST):
-		qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-		/* FALLTHROUGH */
-	case OP(RDMA_READ_RESPONSE_MIDDLE):
-		len = qp->s_ack_rdma_sge.sge.sge_length;
-		if (len > pmtu)
-			len = pmtu;
-		else {
-			ohdr->u.aeth = ipath_compute_aeth(qp);
-			hwords++;
-			qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
-			qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
-		}
-		bth0 = qp->s_ack_state << 24;
-		bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
-		break;
-
-	default:
-	normal:
-		/*
-		 * Send a regular ACK.
-		 * Set the s_ack_state so we wait until after sending
-		 * the ACK before setting s_ack_state to ACKNOWLEDGE
-		 * (see above).
-		 */
-		qp->s_ack_state = OP(SEND_ONLY);
-		qp->s_flags &= ~IPATH_S_ACK_PENDING;
-		qp->s_cur_sge = NULL;
-		if (qp->s_nak_state)
-			ohdr->u.aeth =
-				cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
-					    (qp->s_nak_state <<
-					     IPATH_AETH_CREDIT_SHIFT));
-		else
-			ohdr->u.aeth = ipath_compute_aeth(qp);
-		hwords++;
-		len = 0;
-		bth0 = OP(ACKNOWLEDGE) << 24;
-		bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
-	}
-	qp->s_hdrwords = hwords;
-	qp->s_cur_size = len;
-	ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
-	return 1;
-
-bail:
-	return 0;
-}
-
-/**
- * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_rc_req(struct ipath_qp *qp)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	struct ipath_other_headers *ohdr;
-	struct ipath_sge_state *ss;
-	struct ipath_swqe *wqe;
-	u32 hwords;
-	u32 len;
-	u32 bth0;
-	u32 bth2;
-	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-	char newreq;
-	unsigned long flags;
-	int ret = 0;
-
-	ohdr = &qp->s_hdr.u.oth;
-	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-		ohdr = &qp->s_hdr.u.l.oth;
-
-	/*
-	 * The lock is needed to synchronize between the sending tasklet,
-	 * the receive interrupt handler, and timeout resends.
-	 */
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	/* Sending responses has higher priority over sending requests. */
-	if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
-	     (qp->s_flags & IPATH_S_ACK_PENDING) ||
-	     qp->s_ack_state != OP(ACKNOWLEDGE)) &&
-	    ipath_make_rc_ack(dev, qp, ohdr, pmtu))
-		goto done;
-
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
-		if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-			goto bail;
-		/* We are in the error state, flush the work request. */
-		if (qp->s_last == qp->s_head)
-			goto bail;
-		/* If DMAs are in progress, we can't flush immediately. */
-		if (atomic_read(&qp->s_dma_busy)) {
-			qp->s_flags |= IPATH_S_WAIT_DMA;
-			goto bail;
-		}
-		wqe = get_swqe_ptr(qp, qp->s_last);
-		ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-		goto done;
-	}
-
-	/* Leave BUSY set until RNR timeout. */
-	if (qp->s_rnr_timeout) {
-		qp->s_flags |= IPATH_S_WAITING;
-		goto bail;
-	}
-
-	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
-	hwords = 5;
-	bth0 = 1 << 22; /* Set M bit */
-
-	/* Send a request. */
-	wqe = get_swqe_ptr(qp, qp->s_cur);
-	switch (qp->s_state) {
-	default:
-		if (!(ib_ipath_state_ops[qp->state] &
-		    IPATH_PROCESS_NEXT_SEND_OK))
-			goto bail;
-		/*
-		 * Resend an old request or start a new one.
-		 *
-		 * We keep track of the current SWQE so that
-		 * we don't reset the "furthest progress" state
-		 * if we need to back up.
-		 */
-		newreq = 0;
-		if (qp->s_cur == qp->s_tail) {
-			/* Check if send work queue is empty. */
-			if (qp->s_tail == qp->s_head)
-				goto bail;
-			/*
-			 * If a fence is requested, wait for previous
-			 * RDMA read and atomic operations to finish.
-			 */
-			if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
-			    qp->s_num_rd_atomic) {
-				qp->s_flags |= IPATH_S_FENCE_PENDING;
-				goto bail;
-			}
-			wqe->psn = qp->s_next_psn;
-			newreq = 1;
-		}
-		/*
-		 * Note that we have to be careful not to modify the
-		 * original work request since we may need to resend
-		 * it.
-		 */
-		len = wqe->length;
-		ss = &qp->s_sge;
-		bth2 = 0;
-		switch (wqe->wr.opcode) {
-		case IB_WR_SEND:
-		case IB_WR_SEND_WITH_IMM:
-			/* If no credit, return. */
-			if (qp->s_lsn != (u32) -1 &&
-			    ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
-				qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
-				goto bail;
-			}
-			wqe->lpsn = wqe->psn;
-			if (len > pmtu) {
-				wqe->lpsn += (len - 1) / pmtu;
-				qp->s_state = OP(SEND_FIRST);
-				len = pmtu;
-				break;
-			}
-			if (wqe->wr.opcode == IB_WR_SEND)
-				qp->s_state = OP(SEND_ONLY);
-			else {
-				qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
-				/* Immediate data comes after the BTH */
-				ohdr->u.imm_data = wqe->wr.ex.imm_data;
-				hwords += 1;
-			}
-			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-				bth0 |= 1 << 23;
-			bth2 = 1 << 31;	/* Request ACK. */
-			if (++qp->s_cur == qp->s_size)
-				qp->s_cur = 0;
-			break;
-
-		case IB_WR_RDMA_WRITE:
-			if (newreq && qp->s_lsn != (u32) -1)
-				qp->s_lsn++;
-			/* FALLTHROUGH */
-		case IB_WR_RDMA_WRITE_WITH_IMM:
-			/* If no credit, return. */
-			if (qp->s_lsn != (u32) -1 &&
-			    ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
-				qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
-				goto bail;
-			}
-			ohdr->u.rc.reth.vaddr =
-				cpu_to_be64(wqe->rdma_wr.remote_addr);
-			ohdr->u.rc.reth.rkey =
-				cpu_to_be32(wqe->rdma_wr.rkey);
-			ohdr->u.rc.reth.length = cpu_to_be32(len);
-			hwords += sizeof(struct ib_reth) / sizeof(u32);
-			wqe->lpsn = wqe->psn;
-			if (len > pmtu) {
-				wqe->lpsn += (len - 1) / pmtu;
-				qp->s_state = OP(RDMA_WRITE_FIRST);
-				len = pmtu;
-				break;
-			}
-			if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-				qp->s_state = OP(RDMA_WRITE_ONLY);
-			else {
-				qp->s_state =
-					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-				/* Immediate data comes after RETH */
-				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
-				hwords += 1;
-				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-					bth0 |= 1 << 23;
-			}
-			bth2 = 1 << 31;	/* Request ACK. */
-			if (++qp->s_cur == qp->s_size)
-				qp->s_cur = 0;
-			break;
-
-		case IB_WR_RDMA_READ:
-			/*
-			 * Don't allow more operations to be started
-			 * than the QP limits allow.
-			 */
-			if (newreq) {
-				if (qp->s_num_rd_atomic >=
-				    qp->s_max_rd_atomic) {
-					qp->s_flags |= IPATH_S_RDMAR_PENDING;
-					goto bail;
-				}
-				qp->s_num_rd_atomic++;
-				if (qp->s_lsn != (u32) -1)
-					qp->s_lsn++;
-				/*
-				 * Adjust s_next_psn to count the
-				 * expected number of responses.
-				 */
-				if (len > pmtu)
-					qp->s_next_psn += (len - 1) / pmtu;
-				wqe->lpsn = qp->s_next_psn++;
-			}
-			ohdr->u.rc.reth.vaddr =
-				cpu_to_be64(wqe->rdma_wr.remote_addr);
-			ohdr->u.rc.reth.rkey =
-				cpu_to_be32(wqe->rdma_wr.rkey);
-			ohdr->u.rc.reth.length = cpu_to_be32(len);
-			qp->s_state = OP(RDMA_READ_REQUEST);
-			hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
-			ss = NULL;
-			len = 0;
-			if (++qp->s_cur == qp->s_size)
-				qp->s_cur = 0;
-			break;
-
-		case IB_WR_ATOMIC_CMP_AND_SWP:
-		case IB_WR_ATOMIC_FETCH_AND_ADD:
-			/*
-			 * Don't allow more operations to be started
-			 * than the QP limits allow.
-			 */
-			if (newreq) {
-				if (qp->s_num_rd_atomic >=
-				    qp->s_max_rd_atomic) {
-					qp->s_flags |= IPATH_S_RDMAR_PENDING;
-					goto bail;
-				}
-				qp->s_num_rd_atomic++;
-				if (qp->s_lsn != (u32) -1)
-					qp->s_lsn++;
-				wqe->lpsn = wqe->psn;
-			}
-			if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-				qp->s_state = OP(COMPARE_SWAP);
-				ohdr->u.atomic_eth.swap_data = cpu_to_be64(
-					wqe->atomic_wr.swap);
-				ohdr->u.atomic_eth.compare_data = cpu_to_be64(
-					wqe->atomic_wr.compare_add);
-			} else {
-				qp->s_state = OP(FETCH_ADD);
-				ohdr->u.atomic_eth.swap_data = cpu_to_be64(
-					wqe->atomic_wr.compare_add);
-				ohdr->u.atomic_eth.compare_data = 0;
-			}
-			ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
-				wqe->atomic_wr.remote_addr >> 32);
-			ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
-				wqe->atomic_wr.remote_addr);
-			ohdr->u.atomic_eth.rkey = cpu_to_be32(
-				wqe->atomic_wr.rkey);
-			hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
-			ss = NULL;
-			len = 0;
-			if (++qp->s_cur == qp->s_size)
-				qp->s_cur = 0;
-			break;
-
-		default:
-			goto bail;
-		}
-		qp->s_sge.sge = wqe->sg_list[0];
-		qp->s_sge.sg_list = wqe->sg_list + 1;
-		qp->s_sge.num_sge = wqe->wr.num_sge;
-		qp->s_len = wqe->length;
-		if (newreq) {
-			qp->s_tail++;
-			if (qp->s_tail >= qp->s_size)
-				qp->s_tail = 0;
-		}
-		bth2 |= qp->s_psn & IPATH_PSN_MASK;
-		if (wqe->wr.opcode == IB_WR_RDMA_READ)
-			qp->s_psn = wqe->lpsn + 1;
-		else {
-			qp->s_psn++;
-			if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-				qp->s_next_psn = qp->s_psn;
-		}
-		/*
-		 * Put the QP on the pending list so lost ACKs will cause
-		 * a retry.  More than one request can be pending so the
-		 * QP may already be on the dev->pending list.
-		 */
-		spin_lock(&dev->pending_lock);
-		if (list_empty(&qp->timerwait))
-			list_add_tail(&qp->timerwait,
-				      &dev->pending[dev->pending_index]);
-		spin_unlock(&dev->pending_lock);
-		break;
-
-	case OP(RDMA_READ_RESPONSE_FIRST):
-		/*
-		 * This case can only happen if a send is restarted.
-		 * See ipath_restart_rc().
-		 */
-		ipath_init_restart(qp, wqe);
-		/* FALLTHROUGH */
-	case OP(SEND_FIRST):
-		qp->s_state = OP(SEND_MIDDLE);
-		/* FALLTHROUGH */
-	case OP(SEND_MIDDLE):
-		bth2 = qp->s_psn++ & IPATH_PSN_MASK;
-		if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-			qp->s_next_psn = qp->s_psn;
-		ss = &qp->s_sge;
-		len = qp->s_len;
-		if (len > pmtu) {
-			len = pmtu;
-			break;
-		}
-		if (wqe->wr.opcode == IB_WR_SEND)
-			qp->s_state = OP(SEND_LAST);
-		else {
-			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
-			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.ex.imm_data;
-			hwords += 1;
-		}
-		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-			bth0 |= 1 << 23;
-		bth2 |= 1 << 31;	/* Request ACK. */
-		qp->s_cur++;
-		if (qp->s_cur >= qp->s_size)
-			qp->s_cur = 0;
-		break;
-
-	case OP(RDMA_READ_RESPONSE_LAST):
-		/*
-		 * This case can only happen if a RDMA write is restarted.
-		 * See ipath_restart_rc().
-		 */
-		ipath_init_restart(qp, wqe);
-		/* FALLTHROUGH */
-	case OP(RDMA_WRITE_FIRST):
-		qp->s_state = OP(RDMA_WRITE_MIDDLE);
-		/* FALLTHROUGH */
-	case OP(RDMA_WRITE_MIDDLE):
-		bth2 = qp->s_psn++ & IPATH_PSN_MASK;
-		if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-			qp->s_next_psn = qp->s_psn;
-		ss = &qp->s_sge;
-		len = qp->s_len;
-		if (len > pmtu) {
-			len = pmtu;
-			break;
-		}
-		if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-			qp->s_state = OP(RDMA_WRITE_LAST);
-		else {
-			qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
-			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.ex.imm_data;
-			hwords += 1;
-			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-				bth0 |= 1 << 23;
-		}
-		bth2 |= 1 << 31;	/* Request ACK. */
-		qp->s_cur++;
-		if (qp->s_cur >= qp->s_size)
-			qp->s_cur = 0;
-		break;
-
-	case OP(RDMA_READ_RESPONSE_MIDDLE):
-		/*
-		 * This case can only happen if a RDMA read is restarted.
-		 * See ipath_restart_rc().
-		 */
-		ipath_init_restart(qp, wqe);
-		len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
-		ohdr->u.rc.reth.vaddr =
-			cpu_to_be64(wqe->rdma_wr.remote_addr + len);
-		ohdr->u.rc.reth.rkey =
-			cpu_to_be32(wqe->rdma_wr.rkey);
-		ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
-		qp->s_state = OP(RDMA_READ_REQUEST);
-		hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
-		bth2 = qp->s_psn & IPATH_PSN_MASK;
-		qp->s_psn = wqe->lpsn + 1;
-		ss = NULL;
-		len = 0;
-		qp->s_cur++;
-		if (qp->s_cur == qp->s_size)
-			qp->s_cur = 0;
-		break;
-	}
-	if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
-		bth2 |= 1 << 31;	/* Request ACK. */
-	qp->s_len -= len;
-	qp->s_hdrwords = hwords;
-	qp->s_cur_sge = ss;
-	qp->s_cur_size = len;
-	ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
-done:
-	ret = 1;
-	goto unlock;
-
-bail:
-	qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	return ret;
-}
-
-/**
- * send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from ipath_rc_rcv() and only uses the receive
- * side QP state.
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-static void send_rc_ack(struct ipath_qp *qp)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	struct ipath_devdata *dd;
-	u16 lrh0;
-	u32 bth0;
-	u32 hwords;
-	u32 __iomem *piobuf;
-	struct ipath_ib_header hdr;
-	struct ipath_other_headers *ohdr;
-	unsigned long flags;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
-	if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
-	    (qp->s_flags & IPATH_S_ACK_PENDING) ||
-	    qp->s_ack_state != OP(ACKNOWLEDGE))
-		goto queue_ack;
-
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-
-	/* Don't try to send ACKs if the link isn't ACTIVE */
-	dd = dev->dd;
-	if (!(dd->ipath_flags & IPATH_LINKACTIVE))
-		goto done;
-
-	piobuf = ipath_getpiobuf(dd, 0, NULL);
-	if (!piobuf) {
-		/*
-		 * We are out of PIO buffers at the moment.
-		 * Pass responsibility for sending the ACK to the
-		 * send tasklet so that when a PIO buffer becomes
-		 * available, the ACK is sent ahead of other outgoing
-		 * packets.
-		 */
-		spin_lock_irqsave(&qp->s_lock, flags);
-		goto queue_ack;
-	}
-
-	/* Construct the header. */
-	ohdr = &hdr.u.oth;
-	lrh0 = IPATH_LRH_BTH;
-	/* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
-	hwords = 6;
-	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-		hwords += ipath_make_grh(dev, &hdr.u.l.grh,
-					 &qp->remote_ah_attr.grh,
-					 hwords, 0);
-		ohdr = &hdr.u.l.oth;
-		lrh0 = IPATH_LRH_GRH;
-	}
-	/* read pkey_index w/o lock (its atomic) */
-	bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
-		(OP(ACKNOWLEDGE) << 24) | (1 << 22);
-	if (qp->r_nak_state)
-		ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
-					    (qp->r_nak_state <<
-					     IPATH_AETH_CREDIT_SHIFT));
-	else
-		ohdr->u.aeth = ipath_compute_aeth(qp);
-	lrh0 |= qp->remote_ah_attr.sl << 4;
-	hdr.lrh[0] = cpu_to_be16(lrh0);
-	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
-	hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
-				 qp->remote_ah_attr.src_path_bits);
-	ohdr->bth[0] = cpu_to_be32(bth0);
-	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-	ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
-
-	writeq(hwords + 1, piobuf);
-
-	if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
-		u32 *hdrp = (u32 *) &hdr;
-
-		ipath_flush_wc();
-		__iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
-		ipath_flush_wc();
-		__raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
-	} else
-		__iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
-
-	ipath_flush_wc();
-
-	dev->n_unicast_xmit++;
-	goto done;
-
-queue_ack:
-	if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) {
-		dev->n_rc_qacks++;
-		qp->s_flags |= IPATH_S_ACK_PENDING;
-		qp->s_nak_state = qp->r_nak_state;
-		qp->s_ack_psn = qp->r_ack_psn;
-
-		/* Schedule the send tasklet. */
-		ipath_schedule_send(qp);
-	}
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-	return;
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct ipath_qp *qp, u32 psn)
-{
-	u32 n = qp->s_last;
-	struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
-	u32 opcode;
-
-	qp->s_cur = n;
-
-	/*
-	 * If we are starting the request from the beginning,
-	 * let the normal send code handle initialization.
-	 */
-	if (ipath_cmp24(psn, wqe->psn) <= 0) {
-		qp->s_state = OP(SEND_LAST);
-		goto done;
-	}
-
-	/* Find the work request opcode corresponding to the given PSN. */
-	opcode = wqe->wr.opcode;
-	for (;;) {
-		int diff;
-
-		if (++n == qp->s_size)
-			n = 0;
-		if (n == qp->s_tail)
-			break;
-		wqe = get_swqe_ptr(qp, n);
-		diff = ipath_cmp24(psn, wqe->psn);
-		if (diff < 0)
-			break;
-		qp->s_cur = n;
-		/*
-		 * If we are starting the request from the beginning,
-		 * let the normal send code handle initialization.
-		 */
-		if (diff == 0) {
-			qp->s_state = OP(SEND_LAST);
-			goto done;
-		}
-		opcode = wqe->wr.opcode;
-	}
-
-	/*
-	 * Set the state to restart in the middle of a request.
-	 * Don't change the s_sge, s_cur_sge, or s_cur_size.
-	 * See ipath_make_rc_req().
-	 */
-	switch (opcode) {
-	case IB_WR_SEND:
-	case IB_WR_SEND_WITH_IMM:
-		qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
-		break;
-
-	case IB_WR_RDMA_WRITE:
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
-		break;
-
-	case IB_WR_RDMA_READ:
-		qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-		break;
-
-	default:
-		/*
-		 * This case shouldn't happen since its only
-		 * one PSN per req.
-		 */
-		qp->s_state = OP(SEND_LAST);
-	}
-done:
-	qp->s_psn = psn;
-}
-
-/**
- * ipath_restart_rc - back up requester to resend the last un-ACKed request
- * @qp: the QP to restart
- * @psn: packet sequence number for the request
- * @wc: the work completion request
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
-{
-	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
-	struct ipath_ibdev *dev;
-
-	if (qp->s_retry == 0) {
-		ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
-		ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-		goto bail;
-	}
-	qp->s_retry--;
-
-	/*
-	 * Remove the QP from the timeout queue.
-	 * Note: it may already have been removed by ipath_ib_timer().
-	 */
-	dev = to_idev(qp->ibqp.device);
-	spin_lock(&dev->pending_lock);
-	if (!list_empty(&qp->timerwait))
-		list_del_init(&qp->timerwait);
-	if (!list_empty(&qp->piowait))
-		list_del_init(&qp->piowait);
-	spin_unlock(&dev->pending_lock);
-
-	if (wqe->wr.opcode == IB_WR_RDMA_READ)
-		dev->n_rc_resends++;
-	else
-		dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
-
-	reset_psn(qp, psn);
-	ipath_schedule_send(qp);
-
-bail:
-	return;
-}
-
-static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
-{
-	qp->s_last_psn = psn;
-}
-
-/**
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held and interrupts disabled.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
-		     u64 val)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	struct ib_wc wc;
-	enum ib_wc_status status;
-	struct ipath_swqe *wqe;
-	int ret = 0;
-	u32 ack_psn;
-	int diff;
-
-	/*
-	 * Remove the QP from the timeout queue (or RNR timeout queue).
-	 * If ipath_ib_timer() has already removed it,
-	 * it's OK since we hold the QP s_lock and ipath_restart_rc()
-	 * just won't find anything to restart if we ACK everything.
-	 */
-	spin_lock(&dev->pending_lock);
-	if (!list_empty(&qp->timerwait))
-		list_del_init(&qp->timerwait);
-	spin_unlock(&dev->pending_lock);
-
-	/*
-	 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
-	 * requests and implicitly NAK RDMA read and atomic requests issued
-	 * before the NAK'ed request.  The MSN won't include the NAK'ed
-	 * request but will include an ACK'ed request(s).
-	 */
-	ack_psn = psn;
-	if (aeth >> 29)
-		ack_psn--;
-	wqe = get_swqe_ptr(qp, qp->s_last);
-
-	/*
-	 * The MSN might be for a later WQE than the PSN indicates so
-	 * only complete WQEs that the PSN finishes.
-	 */
-	while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
-		/*
-		 * RDMA_READ_RESPONSE_ONLY is a special case since
-		 * we want to generate completion events for everything
-		 * before the RDMA read, copy the data, then generate
-		 * the completion for the read.
-		 */
-		if (wqe->wr.opcode == IB_WR_RDMA_READ &&
-		    opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
-		    diff == 0) {
-			ret = 1;
-			goto bail;
-		}
-		/*
-		 * If this request is a RDMA read or atomic, and the ACK is
-		 * for a later operation, this ACK NAKs the RDMA read or
-		 * atomic.  In other words, only a RDMA_READ_LAST or ONLY
-		 * can ACK a RDMA read and likewise for atomic ops.  Note
-		 * that the NAK case can only happen if relaxed ordering is
-		 * used and requests are sent after an RDMA read or atomic
-		 * is sent but before the response is received.
-		 */
-		if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
-		     (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
-		    ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-		      wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
-		     (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
-			/*
-			 * The last valid PSN seen is the previous
-			 * request's.
-			 */
-			update_last_psn(qp, wqe->psn - 1);
-			/* Retry this request. */
-			ipath_restart_rc(qp, wqe->psn);
-			/*
-			 * No need to process the ACK/NAK since we are
-			 * restarting an earlier request.
-			 */
-			goto bail;
-		}
-		if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
-			*(u64 *) wqe->sg_list[0].vaddr = val;
-		if (qp->s_num_rd_atomic &&
-		    (wqe->wr.opcode == IB_WR_RDMA_READ ||
-		     wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-		     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
-			qp->s_num_rd_atomic--;
-			/* Restart sending task if fence is complete */
-			if (((qp->s_flags & IPATH_S_FENCE_PENDING) &&
-			     !qp->s_num_rd_atomic) ||
-			    qp->s_flags & IPATH_S_RDMAR_PENDING)
-				ipath_schedule_send(qp);
-		}
-		/* Post a send completion queue entry if requested. */
-		if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
-		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-			memset(&wc, 0, sizeof wc);
-			wc.wr_id = wqe->wr.wr_id;
-			wc.status = IB_WC_SUCCESS;
-			wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-			wc.byte_len = wqe->length;
-			wc.qp = &qp->ibqp;
-			wc.src_qp = qp->remote_qpn;
-			wc.slid = qp->remote_ah_attr.dlid;
-			wc.sl = qp->remote_ah_attr.sl;
-			ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
-		}
-		qp->s_retry = qp->s_retry_cnt;
-		/*
-		 * If we are completing a request which is in the process of
-		 * being resent, we can stop resending it since we know the
-		 * responder has already seen it.
-		 */
-		if (qp->s_last == qp->s_cur) {
-			if (++qp->s_cur >= qp->s_size)
-				qp->s_cur = 0;
-			qp->s_last = qp->s_cur;
-			if (qp->s_last == qp->s_tail)
-				break;
-			wqe = get_swqe_ptr(qp, qp->s_cur);
-			qp->s_state = OP(SEND_LAST);
-			qp->s_psn = wqe->psn;
-		} else {
-			if (++qp->s_last >= qp->s_size)
-				qp->s_last = 0;
-			if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur)
-				qp->s_draining = 0;
-			if (qp->s_last == qp->s_tail)
-				break;
-			wqe = get_swqe_ptr(qp, qp->s_last);
-		}
-	}
-
-	switch (aeth >> 29) {
-	case 0:		/* ACK */
-		dev->n_rc_acks++;
-		/* If this is a partial ACK, reset the retransmit timer. */
-		if (qp->s_last != qp->s_tail) {
-			spin_lock(&dev->pending_lock);
-			if (list_empty(&qp->timerwait))
-				list_add_tail(&qp->timerwait,
-					&dev->pending[dev->pending_index]);
-			spin_unlock(&dev->pending_lock);
-			/*
-			 * If we get a partial ACK for a resent operation,
-			 * we can stop resending the earlier packets and
-			 * continue with the next packet the receiver wants.
-			 */
-			if (ipath_cmp24(qp->s_psn, psn) <= 0) {
-				reset_psn(qp, psn + 1);
-				ipath_schedule_send(qp);
-			}
-		} else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
-			qp->s_state = OP(SEND_LAST);
-			qp->s_psn = psn + 1;
-		}
-		ipath_get_credit(qp, aeth);
-		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
-		qp->s_retry = qp->s_retry_cnt;
-		update_last_psn(qp, psn);
-		ret = 1;
-		goto bail;
-
-	case 1:		/* RNR NAK */
-		dev->n_rnr_naks++;
-		if (qp->s_last == qp->s_tail)
-			goto bail;
-		if (qp->s_rnr_retry == 0) {
-			status = IB_WC_RNR_RETRY_EXC_ERR;
-			goto class_b;
-		}
-		if (qp->s_rnr_retry_cnt < 7)
-			qp->s_rnr_retry--;
-
-		/* The last valid PSN is the previous PSN. */
-		update_last_psn(qp, psn - 1);
-
-		if (wqe->wr.opcode == IB_WR_RDMA_READ)
-			dev->n_rc_resends++;
-		else
-			dev->n_rc_resends +=
-				(qp->s_psn - psn) & IPATH_PSN_MASK;
-
-		reset_psn(qp, psn);
-
-		qp->s_rnr_timeout =
-			ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
-					   IPATH_AETH_CREDIT_MASK];
-		ipath_insert_rnr_queue(qp);
-		ipath_schedule_send(qp);
-		goto bail;
-
-	case 3:		/* NAK */
-		if (qp->s_last == qp->s_tail)
-			goto bail;
-		/* The last valid PSN is the previous PSN. */
-		update_last_psn(qp, psn - 1);
-		switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
-			IPATH_AETH_CREDIT_MASK) {
-		case 0:	/* PSN sequence error */
-			dev->n_seq_naks++;
-			/*
-			 * Back up to the responder's expected PSN.
-			 * Note that we might get a NAK in the middle of an
-			 * RDMA READ response which terminates the RDMA
-			 * READ.
-			 */
-			ipath_restart_rc(qp, psn);
-			break;
-
-		case 1:	/* Invalid Request */
-			status = IB_WC_REM_INV_REQ_ERR;
-			dev->n_other_naks++;
-			goto class_b;
-
-		case 2:	/* Remote Access Error */
-			status = IB_WC_REM_ACCESS_ERR;
-			dev->n_other_naks++;
-			goto class_b;
-
-		case 3:	/* Remote Operation Error */
-			status = IB_WC_REM_OP_ERR;
-			dev->n_other_naks++;
-		class_b:
-			ipath_send_complete(qp, wqe, status);
-			ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-			break;
-
-		default:
-			/* Ignore other reserved NAK error codes */
-			goto reserved;
-		}
-		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
-		goto bail;
-
-	default:		/* 2: reserved */
-	reserved:
-		/* Ignore reserved NAK codes. */
-		goto bail;
-	}
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_rc_rcv_resp - process an incoming RC response packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
-				     struct ipath_other_headers *ohdr,
-				     void *data, u32 tlen,
-				     struct ipath_qp *qp,
-				     u32 opcode,
-				     u32 psn, u32 hdrsize, u32 pmtu,
-				     int header_in_data)
-{
-	struct ipath_swqe *wqe;
-	enum ib_wc_status status;
-	unsigned long flags;
-	int diff;
-	u32 pad;
-	u32 aeth;
-	u64 val;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	/* Double check we can process this now that we hold the s_lock. */
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-		goto ack_done;
-
-	/* Ignore invalid responses. */
-	if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
-		goto ack_done;
-
-	/* Ignore duplicate responses. */
-	diff = ipath_cmp24(psn, qp->s_last_psn);
-	if (unlikely(diff <= 0)) {
-		/* Update credits for "ghost" ACKs */
-		if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
-			if (!header_in_data)
-				aeth = be32_to_cpu(ohdr->u.aeth);
-			else {
-				aeth = be32_to_cpu(((__be32 *) data)[0]);
-				data += sizeof(__be32);
-			}
-			if ((aeth >> 29) == 0)
-				ipath_get_credit(qp, aeth);
-		}
-		goto ack_done;
-	}
-
-	if (unlikely(qp->s_last == qp->s_tail))
-		goto ack_done;
-	wqe = get_swqe_ptr(qp, qp->s_last);
-	status = IB_WC_SUCCESS;
-
-	switch (opcode) {
-	case OP(ACKNOWLEDGE):
-	case OP(ATOMIC_ACKNOWLEDGE):
-	case OP(RDMA_READ_RESPONSE_FIRST):
-		if (!header_in_data)
-			aeth = be32_to_cpu(ohdr->u.aeth);
-		else {
-			aeth = be32_to_cpu(((__be32 *) data)[0]);
-			data += sizeof(__be32);
-		}
-		if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
-			if (!header_in_data) {
-				__be32 *p = ohdr->u.at.atomic_ack_eth;
-
-				val = ((u64) be32_to_cpu(p[0]) << 32) |
-					be32_to_cpu(p[1]);
-			} else
-				val = be64_to_cpu(((__be64 *) data)[0]);
-		} else
-			val = 0;
-		if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
-		    opcode != OP(RDMA_READ_RESPONSE_FIRST))
-			goto ack_done;
-		hdrsize += 4;
-		wqe = get_swqe_ptr(qp, qp->s_last);
-		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-			goto ack_op_err;
-		qp->r_flags &= ~IPATH_R_RDMAR_SEQ;
-		/*
-		 * If this is a response to a resent RDMA read, we
-		 * have to be careful to copy the data to the right
-		 * location.
-		 */
-		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
-						  wqe, psn, pmtu);
-		goto read_middle;
-
-	case OP(RDMA_READ_RESPONSE_MIDDLE):
-		/* no AETH, no ACK */
-		if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
-			dev->n_rdma_seq++;
-			if (qp->r_flags & IPATH_R_RDMAR_SEQ)
-				goto ack_done;
-			qp->r_flags |= IPATH_R_RDMAR_SEQ;
-			ipath_restart_rc(qp, qp->s_last_psn + 1);
-			goto ack_done;
-		}
-		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-			goto ack_op_err;
-	read_middle:
-		if (unlikely(tlen != (hdrsize + pmtu + 4)))
-			goto ack_len_err;
-		if (unlikely(pmtu >= qp->s_rdma_read_len))
-			goto ack_len_err;
-
-		/* We got a response so update the timeout. */
-		spin_lock(&dev->pending_lock);
-		if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
-			list_move_tail(&qp->timerwait,
-				       &dev->pending[dev->pending_index]);
-		spin_unlock(&dev->pending_lock);
-
-		if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
-			qp->s_retry = qp->s_retry_cnt;
-
-		/*
-		 * Update the RDMA receive state but do the copy w/o
-		 * holding the locks and blocking interrupts.
-		 */
-		qp->s_rdma_read_len -= pmtu;
-		update_last_psn(qp, psn);
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-		ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
-		goto bail;
-
-	case OP(RDMA_READ_RESPONSE_ONLY):
-		if (!header_in_data)
-			aeth = be32_to_cpu(ohdr->u.aeth);
-		else
-			aeth = be32_to_cpu(((__be32 *) data)[0]);
-		if (!do_rc_ack(qp, aeth, psn, opcode, 0))
-			goto ack_done;
-		/* Get the number of bytes the message was padded by. */
-		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-		/*
-		 * Check that the data size is >= 0 && <= pmtu.
-		 * Remember to account for the AETH header (4) and
-		 * ICRC (4).
-		 */
-		if (unlikely(tlen < (hdrsize + pad + 8)))
-			goto ack_len_err;
-		/*
-		 * If this is a response to a resent RDMA read, we
-		 * have to be careful to copy the data to the right
-		 * location.
-		 */
-		wqe = get_swqe_ptr(qp, qp->s_last);
-		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
-						  wqe, psn, pmtu);
-		goto read_last;
-
-	case OP(RDMA_READ_RESPONSE_LAST):
-		/* ACKs READ req. */
-		if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
-			dev->n_rdma_seq++;
-			if (qp->r_flags & IPATH_R_RDMAR_SEQ)
-				goto ack_done;
-			qp->r_flags |= IPATH_R_RDMAR_SEQ;
-			ipath_restart_rc(qp, qp->s_last_psn + 1);
-			goto ack_done;
-		}
-		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-			goto ack_op_err;
-		/* Get the number of bytes the message was padded by. */
-		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-		/*
-		 * Check that the data size is >= 1 && <= pmtu.
-		 * Remember to account for the AETH header (4) and
-		 * ICRC (4).
-		 */
-		if (unlikely(tlen <= (hdrsize + pad + 8)))
-			goto ack_len_err;
-	read_last:
-		tlen -= hdrsize + pad + 8;
-		if (unlikely(tlen != qp->s_rdma_read_len))
-			goto ack_len_err;
-		if (!header_in_data)
-			aeth = be32_to_cpu(ohdr->u.aeth);
-		else {
-			aeth = be32_to_cpu(((__be32 *) data)[0]);
-			data += sizeof(__be32);
-		}
-		ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
-		(void) do_rc_ack(qp, aeth, psn,
-				 OP(RDMA_READ_RESPONSE_LAST), 0);
-		goto ack_done;
-	}
-
-ack_op_err:
-	status = IB_WC_LOC_QP_OP_ERR;
-	goto ack_err;
-
-ack_len_err:
-	status = IB_WC_LOC_LEN_ERR;
-ack_err:
-	ipath_send_complete(qp, wqe, status);
-	ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-ack_done:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
-	return;
-}
-
-/**
- * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
-				     struct ipath_other_headers *ohdr,
-				     void *data,
-				     struct ipath_qp *qp,
-				     u32 opcode,
-				     u32 psn,
-				     int diff,
-				     int header_in_data)
-{
-	struct ipath_ack_entry *e;
-	u8 i, prev;
-	int old_req;
-	unsigned long flags;
-
-	if (diff > 0) {
-		/*
-		 * Packet sequence error.
-		 * A NAK will ACK earlier sends and RDMA writes.
-		 * Don't queue the NAK if we already sent one.
-		 */
-		if (!qp->r_nak_state) {
-			qp->r_nak_state = IB_NAK_PSN_ERROR;
-			/* Use the expected PSN. */
-			qp->r_ack_psn = qp->r_psn;
-			goto send_ack;
-		}
-		goto done;
-	}
-
-	/*
-	 * Handle a duplicate request.  Don't re-execute SEND, RDMA
-	 * write or atomic op.  Don't NAK errors, just silently drop
-	 * the duplicate request.  Note that r_sge, r_len, and
-	 * r_rcv_len may be in use so don't modify them.
-	 *
-	 * We are supposed to ACK the earliest duplicate PSN but we
-	 * can coalesce an outstanding duplicate ACK.  We have to
-	 * send the earliest so that RDMA reads can be restarted at
-	 * the requester's expected PSN.
-	 *
-	 * First, find where this duplicate PSN falls within the
-	 * ACKs previously sent.
-	 */
-	psn &= IPATH_PSN_MASK;
-	e = NULL;
-	old_req = 1;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-	/* Double check we can process this now that we hold the s_lock. */
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-		goto unlock_done;
-
-	for (i = qp->r_head_ack_queue; ; i = prev) {
-		if (i == qp->s_tail_ack_queue)
-			old_req = 0;
-		if (i)
-			prev = i - 1;
-		else
-			prev = IPATH_MAX_RDMA_ATOMIC;
-		if (prev == qp->r_head_ack_queue) {
-			e = NULL;
-			break;
-		}
-		e = &qp->s_ack_queue[prev];
-		if (!e->opcode) {
-			e = NULL;
-			break;
-		}
-		if (ipath_cmp24(psn, e->psn) >= 0) {
-			if (prev == qp->s_tail_ack_queue)
-				old_req = 0;
-			break;
-		}
-	}
-	switch (opcode) {
-	case OP(RDMA_READ_REQUEST): {
-		struct ib_reth *reth;
-		u32 offset;
-		u32 len;
-
-		/*
-		 * If we didn't find the RDMA read request in the ack queue,
-		 * or the send tasklet is already backed up to send an
-		 * earlier entry, we can ignore this request.
-		 */
-		if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
-			goto unlock_done;
-		/* RETH comes after BTH */
-		if (!header_in_data)
-			reth = &ohdr->u.rc.reth;
-		else {
-			reth = (struct ib_reth *)data;
-			data += sizeof(*reth);
-		}
-		/*
-		 * Address range must be a subset of the original
-		 * request and start on pmtu boundaries.
-		 * We reuse the old ack_queue slot since the requester
-		 * should not back up and request an earlier PSN for the
-		 * same request.
-		 */
-		offset = ((psn - e->psn) & IPATH_PSN_MASK) *
-			ib_mtu_enum_to_int(qp->path_mtu);
-		len = be32_to_cpu(reth->length);
-		if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
-			goto unlock_done;
-		if (len != 0) {
-			u32 rkey = be32_to_cpu(reth->rkey);
-			u64 vaddr = be64_to_cpu(reth->vaddr);
-			int ok;
-
-			ok = ipath_rkey_ok(qp, &e->rdma_sge,
-					   len, vaddr, rkey,
-					   IB_ACCESS_REMOTE_READ);
-			if (unlikely(!ok))
-				goto unlock_done;
-		} else {
-			e->rdma_sge.sg_list = NULL;
-			e->rdma_sge.num_sge = 0;
-			e->rdma_sge.sge.mr = NULL;
-			e->rdma_sge.sge.vaddr = NULL;
-			e->rdma_sge.sge.length = 0;
-			e->rdma_sge.sge.sge_length = 0;
-		}
-		e->psn = psn;
-		qp->s_ack_state = OP(ACKNOWLEDGE);
-		qp->s_tail_ack_queue = prev;
-		break;
-	}
-
-	case OP(COMPARE_SWAP):
-	case OP(FETCH_ADD): {
-		/*
-		 * If we didn't find the atomic request in the ack queue
-		 * or the send tasklet is already backed up to send an
-		 * earlier entry, we can ignore this request.
-		 */
-		if (!e || e->opcode != (u8) opcode || old_req)
-			goto unlock_done;
-		qp->s_ack_state = OP(ACKNOWLEDGE);
-		qp->s_tail_ack_queue = prev;
-		break;
-	}
-
-	default:
-		if (old_req)
-			goto unlock_done;
-		/*
-		 * Resend the most recent ACK if this request is
-		 * after all the previous RDMA reads and atomics.
-		 */
-		if (i == qp->r_head_ack_queue) {
-			spin_unlock_irqrestore(&qp->s_lock, flags);
-			qp->r_nak_state = 0;
-			qp->r_ack_psn = qp->r_psn - 1;
-			goto send_ack;
-		}
-		/*
-		 * Try to send a simple ACK to work around a Mellanox bug
-		 * which doesn't accept a RDMA read response or atomic
-		 * response as an ACK for earlier SENDs or RDMA writes.
-		 */
-		if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
-		    !(qp->s_flags & IPATH_S_ACK_PENDING) &&
-		    qp->s_ack_state == OP(ACKNOWLEDGE)) {
-			spin_unlock_irqrestore(&qp->s_lock, flags);
-			qp->r_nak_state = 0;
-			qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
-			goto send_ack;
-		}
-		/*
-		 * Resend the RDMA read or atomic op which
-		 * ACKs this duplicate request.
-		 */
-		qp->s_ack_state = OP(ACKNOWLEDGE);
-		qp->s_tail_ack_queue = i;
-		break;
-	}
-	qp->r_nak_state = 0;
-	ipath_schedule_send(qp);
-
-unlock_done:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-	return 1;
-
-send_ack:
-	return 0;
-}
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
-{
-	unsigned long flags;
-	int lastwqe;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-	lastwqe = ipath_error_qp(qp, err);
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-
-	if (lastwqe) {
-		struct ib_event ev;
-
-		ev.device = qp->ibqp.device;
-		ev.element.qp = &qp->ibqp;
-		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-	}
-}
-
-static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
-{
-	unsigned next;
-
-	next = n + 1;
-	if (next > IPATH_MAX_RDMA_ATOMIC)
-		next = 0;
-	if (n == qp->s_tail_ack_queue) {
-		qp->s_tail_ack_queue = next;
-		qp->s_ack_state = OP(ACKNOWLEDGE);
-	}
-}
-
-/**
- * ipath_rc_rcv - process an incoming RC packet
- * @dev: the device this packet came in on
- * @hdr: the header of this packet
- * @has_grh: true if the header has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from ipath_qp_rcv() to process an incoming RC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-	struct ipath_other_headers *ohdr;
-	u32 opcode;
-	u32 hdrsize;
-	u32 psn;
-	u32 pad;
-	struct ib_wc wc;
-	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-	int diff;
-	struct ib_reth *reth;
-	int header_in_data;
-	unsigned long flags;
-
-	/* Validate the SLID. See Ch. 9.6.1.5 */
-	if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
-		goto done;
-
-	/* Check for GRH */
-	if (!has_grh) {
-		ohdr = &hdr->u.oth;
-		hdrsize = 8 + 12;	/* LRH + BTH */
-		psn = be32_to_cpu(ohdr->bth[2]);
-		header_in_data = 0;
-	} else {
-		ohdr = &hdr->u.l.oth;
-		hdrsize = 8 + 40 + 12;	/* LRH + GRH + BTH */
-		/*
-		 * The header with GRH is 60 bytes and the core driver sets
-		 * the eager header buffer size to 56 bytes so the last 4
-		 * bytes of the BTH header (PSN) is in the data buffer.
-		 */
-		header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-		if (header_in_data) {
-			psn = be32_to_cpu(((__be32 *) data)[0]);
-			data += sizeof(__be32);
-		} else
-			psn = be32_to_cpu(ohdr->bth[2]);
-	}
-
-	/*
-	 * Process responses (ACKs) before anything else.  Note that the
-	 * packet sequence number will be for something in the send work
-	 * queue rather than the expected receive packet sequence number.
-	 * In other words, this QP is the requester.
-	 */
-	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
-	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
-		ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
-				  hdrsize, pmtu, header_in_data);
-		goto done;
-	}
-
-	/* Compute 24 bits worth of difference. */
-	diff = ipath_cmp24(psn, qp->r_psn);
-	if (unlikely(diff)) {
-		if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
-				       psn, diff, header_in_data))
-			goto done;
-		goto send_ack;
-	}
-
-	/* Check for opcode sequence errors. */
-	switch (qp->r_state) {
-	case OP(SEND_FIRST):
-	case OP(SEND_MIDDLE):
-		if (opcode == OP(SEND_MIDDLE) ||
-		    opcode == OP(SEND_LAST) ||
-		    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
-			break;
-		goto nack_inv;
-
-	case OP(RDMA_WRITE_FIRST):
-	case OP(RDMA_WRITE_MIDDLE):
-		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
-		    opcode == OP(RDMA_WRITE_LAST) ||
-		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-			break;
-		goto nack_inv;
-
-	default:
-		if (opcode == OP(SEND_MIDDLE) ||
-		    opcode == OP(SEND_LAST) ||
-		    opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
-		    opcode == OP(RDMA_WRITE_MIDDLE) ||
-		    opcode == OP(RDMA_WRITE_LAST) ||
-		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-			goto nack_inv;
-		/*
-		 * Note that it is up to the requester to not send a new
-		 * RDMA read or atomic operation before receiving an ACK
-		 * for the previous operation.
-		 */
-		break;
-	}
-
-	memset(&wc, 0, sizeof wc);
-
-	/* OK, process the packet. */
-	switch (opcode) {
-	case OP(SEND_FIRST):
-		if (!ipath_get_rwqe(qp, 0))
-			goto rnr_nak;
-		qp->r_rcv_len = 0;
-		/* FALLTHROUGH */
-	case OP(SEND_MIDDLE):
-	case OP(RDMA_WRITE_MIDDLE):
-	send_middle:
-		/* Check for invalid length PMTU or posted rwqe len. */
-		if (unlikely(tlen != (hdrsize + pmtu + 4)))
-			goto nack_inv;
-		qp->r_rcv_len += pmtu;
-		if (unlikely(qp->r_rcv_len > qp->r_len))
-			goto nack_inv;
-		ipath_copy_sge(&qp->r_sge, data, pmtu);
-		break;
-
-	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-		/* consume RWQE */
-		if (!ipath_get_rwqe(qp, 1))
-			goto rnr_nak;
-		goto send_last_imm;
-
-	case OP(SEND_ONLY):
-	case OP(SEND_ONLY_WITH_IMMEDIATE):
-		if (!ipath_get_rwqe(qp, 0))
-			goto rnr_nak;
-		qp->r_rcv_len = 0;
-		if (opcode == OP(SEND_ONLY))
-			goto send_last;
-		/* FALLTHROUGH */
-	case OP(SEND_LAST_WITH_IMMEDIATE):
-	send_last_imm:
-		if (header_in_data) {
-			wc.ex.imm_data = *(__be32 *) data;
-			data += sizeof(__be32);
-		} else {
-			/* Immediate data comes after BTH */
-			wc.ex.imm_data = ohdr->u.imm_data;
-		}
-		hdrsize += 4;
-		wc.wc_flags = IB_WC_WITH_IMM;
-		/* FALLTHROUGH */
-	case OP(SEND_LAST):
-	case OP(RDMA_WRITE_LAST):
-	send_last:
-		/* Get the number of bytes the message was padded by. */
-		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-		/* Check for invalid length. */
-		/* XXX LAST len should be >= 1 */
-		if (unlikely(tlen < (hdrsize + pad + 4)))
-			goto nack_inv;
-		/* Don't count the CRC. */
-		tlen -= (hdrsize + pad + 4);
-		wc.byte_len = tlen + qp->r_rcv_len;
-		if (unlikely(wc.byte_len > qp->r_len))
-			goto nack_inv;
-		ipath_copy_sge(&qp->r_sge, data, tlen);
-		qp->r_msn++;
-		if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-			break;
-		wc.wr_id = qp->r_wr_id;
-		wc.status = IB_WC_SUCCESS;
-		if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
-		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-			wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-		else
-			wc.opcode = IB_WC_RECV;
-		wc.qp = &qp->ibqp;
-		wc.src_qp = qp->remote_qpn;
-		wc.slid = qp->remote_ah_attr.dlid;
-		wc.sl = qp->remote_ah_attr.sl;
-		/* Signal completion event if the solicited bit is set. */
-		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-			       (ohdr->bth[0] &
-				cpu_to_be32(1 << 23)) != 0);
-		break;
-
-	case OP(RDMA_WRITE_FIRST):
-	case OP(RDMA_WRITE_ONLY):
-	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
-		if (unlikely(!(qp->qp_access_flags &
-			       IB_ACCESS_REMOTE_WRITE)))
-			goto nack_inv;
-		/* consume RWQE */
-		/* RETH comes after BTH */
-		if (!header_in_data)
-			reth = &ohdr->u.rc.reth;
-		else {
-			reth = (struct ib_reth *)data;
-			data += sizeof(*reth);
-		}
-		hdrsize += sizeof(*reth);
-		qp->r_len = be32_to_cpu(reth->length);
-		qp->r_rcv_len = 0;
-		if (qp->r_len != 0) {
-			u32 rkey = be32_to_cpu(reth->rkey);
-			u64 vaddr = be64_to_cpu(reth->vaddr);
-			int ok;
-
-			/* Check rkey & NAK */
-			ok = ipath_rkey_ok(qp, &qp->r_sge,
-					   qp->r_len, vaddr, rkey,
-					   IB_ACCESS_REMOTE_WRITE);
-			if (unlikely(!ok))
-				goto nack_acc;
-		} else {
-			qp->r_sge.sg_list = NULL;
-			qp->r_sge.sge.mr = NULL;
-			qp->r_sge.sge.vaddr = NULL;
-			qp->r_sge.sge.length = 0;
-			qp->r_sge.sge.sge_length = 0;
-		}
-		if (opcode == OP(RDMA_WRITE_FIRST))
-			goto send_middle;
-		else if (opcode == OP(RDMA_WRITE_ONLY))
-			goto send_last;
-		if (!ipath_get_rwqe(qp, 1))
-			goto rnr_nak;
-		goto send_last_imm;
-
-	case OP(RDMA_READ_REQUEST): {
-		struct ipath_ack_entry *e;
-		u32 len;
-		u8 next;
-
-		if (unlikely(!(qp->qp_access_flags &
-			       IB_ACCESS_REMOTE_READ)))
-			goto nack_inv;
-		next = qp->r_head_ack_queue + 1;
-		if (next > IPATH_MAX_RDMA_ATOMIC)
-			next = 0;
-		spin_lock_irqsave(&qp->s_lock, flags);
-		/* Double check we can process this while holding the s_lock. */
-		if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-			goto unlock;
-		if (unlikely(next == qp->s_tail_ack_queue)) {
-			if (!qp->s_ack_queue[next].sent)
-				goto nack_inv_unlck;
-			ipath_update_ack_queue(qp, next);
-		}
-		e = &qp->s_ack_queue[qp->r_head_ack_queue];
-		/* RETH comes after BTH */
-		if (!header_in_data)
-			reth = &ohdr->u.rc.reth;
-		else {
-			reth = (struct ib_reth *)data;
-			data += sizeof(*reth);
-		}
-		len = be32_to_cpu(reth->length);
-		if (len) {
-			u32 rkey = be32_to_cpu(reth->rkey);
-			u64 vaddr = be64_to_cpu(reth->vaddr);
-			int ok;
-
-			/* Check rkey & NAK */
-			ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
-					   rkey, IB_ACCESS_REMOTE_READ);
-			if (unlikely(!ok))
-				goto nack_acc_unlck;
-			/*
-			 * Update the next expected PSN.  We add 1 later
-			 * below, so only add the remainder here.
-			 */
-			if (len > pmtu)
-				qp->r_psn += (len - 1) / pmtu;
-		} else {
-			e->rdma_sge.sg_list = NULL;
-			e->rdma_sge.num_sge = 0;
-			e->rdma_sge.sge.mr = NULL;
-			e->rdma_sge.sge.vaddr = NULL;
-			e->rdma_sge.sge.length = 0;
-			e->rdma_sge.sge.sge_length = 0;
-		}
-		e->opcode = opcode;
-		e->sent = 0;
-		e->psn = psn;
-		/*
-		 * We need to increment the MSN here instead of when we
-		 * finish sending the result since a duplicate request would
-		 * increment it more than once.
-		 */
-		qp->r_msn++;
-		qp->r_psn++;
-		qp->r_state = opcode;
-		qp->r_nak_state = 0;
-		qp->r_head_ack_queue = next;
-
-		/* Schedule the send tasklet. */
-		ipath_schedule_send(qp);
-
-		goto unlock;
-	}
-
-	case OP(COMPARE_SWAP):
-	case OP(FETCH_ADD): {
-		struct ib_atomic_eth *ateth;
-		struct ipath_ack_entry *e;
-		u64 vaddr;
-		atomic64_t *maddr;
-		u64 sdata;
-		u32 rkey;
-		u8 next;
-
-		if (unlikely(!(qp->qp_access_flags &
-			       IB_ACCESS_REMOTE_ATOMIC)))
-			goto nack_inv;
-		next = qp->r_head_ack_queue + 1;
-		if (next > IPATH_MAX_RDMA_ATOMIC)
-			next = 0;
-		spin_lock_irqsave(&qp->s_lock, flags);
-		/* Double check we can process this while holding the s_lock. */
-		if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-			goto unlock;
-		if (unlikely(next == qp->s_tail_ack_queue)) {
-			if (!qp->s_ack_queue[next].sent)
-				goto nack_inv_unlck;
-			ipath_update_ack_queue(qp, next);
-		}
-		if (!header_in_data)
-			ateth = &ohdr->u.atomic_eth;
-		else
-			ateth = (struct ib_atomic_eth *)data;
-		vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
-			be32_to_cpu(ateth->vaddr[1]);
-		if (unlikely(vaddr & (sizeof(u64) - 1)))
-			goto nack_inv_unlck;
-		rkey = be32_to_cpu(ateth->rkey);
-		/* Check rkey & NAK */
-		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
-					    sizeof(u64), vaddr, rkey,
-					    IB_ACCESS_REMOTE_ATOMIC)))
-			goto nack_acc_unlck;
-		/* Perform atomic OP and save result. */
-		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-		sdata = be64_to_cpu(ateth->swap_data);
-		e = &qp->s_ack_queue[qp->r_head_ack_queue];
-		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
-			(u64) atomic64_add_return(sdata, maddr) - sdata :
-			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-				      be64_to_cpu(ateth->compare_data),
-				      sdata);
-		e->opcode = opcode;
-		e->sent = 0;
-		e->psn = psn & IPATH_PSN_MASK;
-		qp->r_msn++;
-		qp->r_psn++;
-		qp->r_state = opcode;
-		qp->r_nak_state = 0;
-		qp->r_head_ack_queue = next;
-
-		/* Schedule the send tasklet. */
-		ipath_schedule_send(qp);
-
-		goto unlock;
-	}
-
-	default:
-		/* NAK unknown opcodes. */
-		goto nack_inv;
-	}
-	qp->r_psn++;
-	qp->r_state = opcode;
-	qp->r_ack_psn = psn;
-	qp->r_nak_state = 0;
-	/* Send an ACK if requested or required. */
-	if (psn & (1 << 31))
-		goto send_ack;
-	goto done;
-
-rnr_nak:
-	qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
-	qp->r_ack_psn = qp->r_psn;
-	goto send_ack;
-
-nack_inv_unlck:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
-	ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-	qp->r_nak_state = IB_NAK_INVALID_REQUEST;
-	qp->r_ack_psn = qp->r_psn;
-	goto send_ack;
-
-nack_acc_unlck:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
-	ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
-	qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
-	qp->r_ack_psn = qp->r_psn;
-send_ack:
-	send_rc_ack(qp);
-	goto done;
-
-unlock:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-	return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_registers.h b/drivers/staging/rdma/ipath/ipath_registers.h
deleted file mode 100644
index 8f44d0c..0000000
--- a/drivers/staging/rdma/ipath/ipath_registers.h
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_REGISTERS_H
-#define _IPATH_REGISTERS_H
-
-/*
- * This file should only be included by kernel source, and by the diags.  It
- * defines the registers, and their contents, for InfiniPath chips.
- */
-
-/*
- * These are the InfiniPath register and buffer bit definitions,
- * that are visible to software, and needed only by the kernel
- * and diag code.  A few, that are visible to protocol and user
- * code are in ipath_common.h.  Some bits are specific
- * to a given chip implementation, and have been moved to the
- * chip-specific source file
- */
-
-/* kr_revision bits */
-#define INFINIPATH_R_CHIPREVMINOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMINOR_SHIFT 0
-#define INFINIPATH_R_CHIPREVMAJOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMAJOR_SHIFT 8
-#define INFINIPATH_R_ARCH_MASK 0xFF
-#define INFINIPATH_R_ARCH_SHIFT 16
-#define INFINIPATH_R_SOFTWARE_MASK 0xFF
-#define INFINIPATH_R_SOFTWARE_SHIFT 24
-#define INFINIPATH_R_BOARDID_MASK 0xFF
-#define INFINIPATH_R_BOARDID_SHIFT 32
-
-/* kr_control bits */
-#define INFINIPATH_C_FREEZEMODE 0x00000002
-#define INFINIPATH_C_LINKENABLE 0x00000004
-
-/* kr_sendctrl bits */
-#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
-#define INFINIPATH_S_UPDTHRESH_SHIFT 24
-#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
-
-#define IPATH_S_ABORT		0
-#define IPATH_S_PIOINTBUFAVAIL	1
-#define IPATH_S_PIOBUFAVAILUPD	2
-#define IPATH_S_PIOENABLE	3
-#define IPATH_S_SDMAINTENABLE	9
-#define IPATH_S_SDMASINGLEDESCRIPTOR	10
-#define IPATH_S_SDMAENABLE	11
-#define IPATH_S_SDMAHALT	12
-#define IPATH_S_DISARM		31
-
-#define INFINIPATH_S_ABORT		(1U << IPATH_S_ABORT)
-#define INFINIPATH_S_PIOINTBUFAVAIL	(1U << IPATH_S_PIOINTBUFAVAIL)
-#define INFINIPATH_S_PIOBUFAVAILUPD	(1U << IPATH_S_PIOBUFAVAILUPD)
-#define INFINIPATH_S_PIOENABLE		(1U << IPATH_S_PIOENABLE)
-#define INFINIPATH_S_SDMAINTENABLE	(1U << IPATH_S_SDMAINTENABLE)
-#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
-					(1U << IPATH_S_SDMASINGLEDESCRIPTOR)
-#define INFINIPATH_S_SDMAENABLE		(1U << IPATH_S_SDMAENABLE)
-#define INFINIPATH_S_SDMAHALT		(1U << IPATH_S_SDMAHALT)
-#define INFINIPATH_S_DISARM		(1U << IPATH_S_DISARM)
-
-/* kr_rcvctrl bits that are the same on multiple chips */
-#define INFINIPATH_R_PORTENABLE_SHIFT 0
-#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_SDMAINT		0x8000000000000000ULL
-#define INFINIPATH_I_SDMADISABLED	0x4000000000000000ULL
-#define INFINIPATH_I_ERROR		0x0000000080000000ULL
-#define INFINIPATH_I_SPIOSENT		0x0000000040000000ULL
-#define INFINIPATH_I_SPIOBUFAVAIL	0x0000000020000000ULL
-#define INFINIPATH_I_GPIO		0x0000000010000000ULL
-#define INFINIPATH_I_JINT		0x0000000004000000ULL
-
-/* kr_errorstatus, kr_errorclear, kr_errormask bits */
-#define INFINIPATH_E_RFORMATERR			0x0000000000000001ULL
-#define INFINIPATH_E_RVCRC			0x0000000000000002ULL
-#define INFINIPATH_E_RICRC			0x0000000000000004ULL
-#define INFINIPATH_E_RMINPKTLEN			0x0000000000000008ULL
-#define INFINIPATH_E_RMAXPKTLEN			0x0000000000000010ULL
-#define INFINIPATH_E_RLONGPKTLEN		0x0000000000000020ULL
-#define INFINIPATH_E_RSHORTPKTLEN		0x0000000000000040ULL
-#define INFINIPATH_E_RUNEXPCHAR			0x0000000000000080ULL
-#define INFINIPATH_E_RUNSUPVL			0x0000000000000100ULL
-#define INFINIPATH_E_REBP			0x0000000000000200ULL
-#define INFINIPATH_E_RIBFLOW			0x0000000000000400ULL
-#define INFINIPATH_E_RBADVERSION		0x0000000000000800ULL
-#define INFINIPATH_E_RRCVEGRFULL		0x0000000000001000ULL
-#define INFINIPATH_E_RRCVHDRFULL		0x0000000000002000ULL
-#define INFINIPATH_E_RBADTID			0x0000000000004000ULL
-#define INFINIPATH_E_RHDRLEN			0x0000000000008000ULL
-#define INFINIPATH_E_RHDR			0x0000000000010000ULL
-#define INFINIPATH_E_RIBLOSTLINK		0x0000000000020000ULL
-#define INFINIPATH_E_SENDSPECIALTRIGGER		0x0000000008000000ULL
-#define INFINIPATH_E_SDMADISABLED		0x0000000010000000ULL
-#define INFINIPATH_E_SMINPKTLEN			0x0000000020000000ULL
-#define INFINIPATH_E_SMAXPKTLEN			0x0000000040000000ULL
-#define INFINIPATH_E_SUNDERRUN			0x0000000080000000ULL
-#define INFINIPATH_E_SPKTLEN			0x0000000100000000ULL
-#define INFINIPATH_E_SDROPPEDSMPPKT		0x0000000200000000ULL
-#define INFINIPATH_E_SDROPPEDDATAPKT		0x0000000400000000ULL
-#define INFINIPATH_E_SPIOARMLAUNCH		0x0000000800000000ULL
-#define INFINIPATH_E_SUNEXPERRPKTNUM		0x0000001000000000ULL
-#define INFINIPATH_E_SUNSUPVL			0x0000002000000000ULL
-#define INFINIPATH_E_SENDBUFMISUSE		0x0000004000000000ULL
-#define INFINIPATH_E_SDMAGENMISMATCH		0x0000008000000000ULL
-#define INFINIPATH_E_SDMAOUTOFBOUND		0x0000010000000000ULL
-#define INFINIPATH_E_SDMATAILOUTOFBOUND		0x0000020000000000ULL
-#define INFINIPATH_E_SDMABASE			0x0000040000000000ULL
-#define INFINIPATH_E_SDMA1STDESC		0x0000080000000000ULL
-#define INFINIPATH_E_SDMARPYTAG			0x0000100000000000ULL
-#define INFINIPATH_E_SDMADWEN			0x0000200000000000ULL
-#define INFINIPATH_E_SDMAMISSINGDW		0x0000400000000000ULL
-#define INFINIPATH_E_SDMAUNEXPDATA		0x0000800000000000ULL
-#define INFINIPATH_E_IBSTATUSCHANGED		0x0001000000000000ULL
-#define INFINIPATH_E_INVALIDADDR		0x0002000000000000ULL
-#define INFINIPATH_E_RESET			0x0004000000000000ULL
-#define INFINIPATH_E_HARDWARE			0x0008000000000000ULL
-#define INFINIPATH_E_SDMADESCADDRMISALIGN	0x0010000000000000ULL
-#define INFINIPATH_E_INVALIDEEPCMD		0x0020000000000000ULL
-
-/*
- * this is used to print "common" packet errors only when the
- * __IPATH_ERRPKTDBG bit is set in ipath_debug.
- */
-#define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \
-		| INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \
-		| INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
-		| INFINIPATH_E_REBP )
-
-/* Convenience for decoding Send DMA errors */
-#define INFINIPATH_E_SDMAERRS ( \
-	INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
-	INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
-	INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
-	INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
-	INFINIPATH_E_SDMAUNEXPDATA | \
-	INFINIPATH_E_SDMADESCADDRMISALIGN | \
-	INFINIPATH_E_SDMADISABLED | \
-	INFINIPATH_E_SENDBUFMISUSE)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
- * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2:  expTID, 3: eagerTID
- * 		bit 4: flag buffer, 5: datainfo, 6: header info */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-/* txe mem parity errors (shift by INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF	0x1ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC	0x2ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOLAUNCHFIFO 0x4ULL
-/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF   0x01ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ  0x02ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID   0x04ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF  0x10ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO  0x40ULL
-/* waldo specific -- find the rest in ipath_6110.c */
-#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR  0x0000000400000000ULL
-/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
-#define INFINIPATH_HWE_MEMBISTFAILED	0x0040000000000000ULL
-
-/* kr_hwdiagctrl bits */
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_DC_FORCERXDSYNCMEMPARITYERR  0x0000000400000000ULL
-#define INFINIPATH_DC_COUNTERDISABLE            0x1000000000000000ULL
-#define INFINIPATH_DC_COUNTERWREN               0x2000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-
-/* kr_ibcctrl bits */
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT 0
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
-#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
-#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
-#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKCMD_DOWN 1		/* move to 0x11 */
-#define INFINIPATH_IBCC_LINKCMD_ARMED 2		/* move to 0x21 */
-#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3	/* move to 0x31 */
-#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
-#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
-#define INFINIPATH_IBCC_MAXPKTLEN_SHIFT 20
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT 32
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT 36
-#define INFINIPATH_IBCC_CREDITSCALE_MASK 0x7ULL
-#define INFINIPATH_IBCC_CREDITSCALE_SHIFT 40
-#define INFINIPATH_IBCC_LOOPBACK             0x8000000000000000ULL
-#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
-
-/* kr_ibcstatus bits */
-#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
-#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
-
-#define INFINIPATH_IBCS_TXREADY       0x40000000
-#define INFINIPATH_IBCS_TXCREDITOK    0x80000000
-/* link training states (shift by
-   INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
-#define INFINIPATH_IBCS_LT_STATE_DISABLED	0x00
-#define INFINIPATH_IBCS_LT_STATE_LINKUP		0x01
-#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE	0x02
-#define INFINIPATH_IBCS_LT_STATE_POLLQUIET	0x03
-#define INFINIPATH_IBCS_LT_STATE_SLEEPDELAY	0x04
-#define INFINIPATH_IBCS_LT_STATE_SLEEPQUIET	0x05
-#define INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE	0x08
-#define INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG	0x09
-#define INFINIPATH_IBCS_LT_STATE_CFGWAITRMT	0x0a
-#define INFINIPATH_IBCS_LT_STATE_CFGIDLE	0x0b
-#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN	0x0c
-#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT	0x0e
-#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE	0x0f
-/* link state machine states (shift by ibcs_ls_shift) */
-#define INFINIPATH_IBCS_L_STATE_DOWN		0x0
-#define INFINIPATH_IBCS_L_STATE_INIT		0x1
-#define INFINIPATH_IBCS_L_STATE_ARM		0x2
-#define INFINIPATH_IBCS_L_STATE_ACTIVE		0x3
-#define INFINIPATH_IBCS_L_STATE_ACT_DEFER	0x4
-
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
-#define INFINIPATH_EXTS_GPIOIN_MASK 0xFFFFULL
-#define INFINIPATH_EXTS_GPIOIN_SHIFT 48
-
-/* kr_extctrl bits */
-#define INFINIPATH_EXTC_GPIOINVERT_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOINVERT_SHIFT 32
-#define INFINIPATH_EXTC_GPIOOE_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOOE_SHIFT 48
-#define INFINIPATH_EXTC_SERDESENABLE         0x80000000ULL
-#define INFINIPATH_EXTC_SERDESCONNECT        0x40000000ULL
-#define INFINIPATH_EXTC_SERDESENTRUNKING     0x20000000ULL
-#define INFINIPATH_EXTC_SERDESDISRXFIFO      0x10000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK1       0x08000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK2       0x04000000ULL
-#define INFINIPATH_EXTC_SERDESENENCDEC       0x02000000ULL
-#define INFINIPATH_EXTC_LED1SECPORT_ON       0x00000020ULL
-#define INFINIPATH_EXTC_LED2SECPORT_ON       0x00000010ULL
-#define INFINIPATH_EXTC_LED1PRIPORT_ON       0x00000008ULL
-#define INFINIPATH_EXTC_LED2PRIPORT_ON       0x00000004ULL
-#define INFINIPATH_EXTC_LEDGBLOK_ON          0x00000002ULL
-#define INFINIPATH_EXTC_LEDGBLERR_OFF        0x00000001ULL
-
-/* kr_partitionkey bits */
-#define INFINIPATH_PKEY_SIZE 16
-#define INFINIPATH_PKEY_MASK 0xFFFF
-#define INFINIPATH_PKEY_DEFAULT_PKEY 0xFFFF
-
-/* kr_serdesconfig0 bits */
-#define INFINIPATH_SERDC0_RESET_MASK  0xfULL	/* overal reset bits */
-#define INFINIPATH_SERDC0_RESET_PLL   0x10000000ULL	/* pll reset */
-/* tx idle enables (per lane) */
-#define INFINIPATH_SERDC0_TXIDLE      0xF000ULL
-/* rx detect enables (per lane) */
-#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
-/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
-#define INFINIPATH_SERDC0_L1PWR_DN	 0xF0ULL
-
-/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
-#define INFINIPATH_XGXS_RX_POL_SHIFT 19
-#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
-
-
-/*
- * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
- * PIO send buffers.  This is well beyond anything currently
- * defined in the InfiniBand spec.
- */
-#define IPATH_PIO_MAXIBHDR 128
-
-typedef u64 ipath_err_t;
-
-/* The following change with the type of device, so
- * need to be part of the ipath_devdata struct, or
- * we could have problems plugging in devices of
- * different types (e.g. one HT, one PCIE)
- * in one system, to be managed by one driver.
- * On the other hand, this file is may also be included
- * by other code, so leave the declarations here
- * temporarily. Minor footprint issue if common-model
- * linker used, none if C89+ linker used.
- */
-
-/* mask of defined bits for various registers */
-extern u64 infinipath_i_bitsextant;
-extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
-
-/* masks that are different in various chips, or only exist in some chips */
-extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
-
-/*
- * These are the infinipath general register numbers (not offsets).
- * The kernel registers are used directly, those beyond the kernel
- * registers are calculated from one of the base registers.  The use of
- * an integer type doesn't allow type-checking as thorough as, say,
- * an enum but allows for better hiding of chip differences.
- */
-typedef const u16 ipath_kreg,	/* infinipath general registers */
- ipath_creg,			/* infinipath counter registers */
- ipath_sreg;			/* kernel-only, infinipath send registers */
-
-/*
- * These are the chip registers common to all infinipath chips, and
- * used both by the kernel and the diagnostics or other user code.
- * They are all implemented such that 64 bit accesses work.
- * Some implement no more than 32 bits.  Because 64 bit reads
- * require 2 HT cmds on opteron, we access those with 32 bit
- * reads for efficiency (they are written as 64 bits, since
- * the extra 32 bits are nearly free on writes, and it slightly reduces
- * complexity).  The rest are all accessed as 64 bits.
- */
-struct ipath_kregs {
-	/* These are the 32 bit group */
-	ipath_kreg kr_control;
-	ipath_kreg kr_counterregbase;
-	ipath_kreg kr_intmask;
-	ipath_kreg kr_intstatus;
-	ipath_kreg kr_pagealign;
-	ipath_kreg kr_portcnt;
-	ipath_kreg kr_rcvtidbase;
-	ipath_kreg kr_rcvtidcnt;
-	ipath_kreg kr_rcvegrbase;
-	ipath_kreg kr_rcvegrcnt;
-	ipath_kreg kr_scratch;
-	ipath_kreg kr_sendctrl;
-	ipath_kreg kr_sendpiobufbase;
-	ipath_kreg kr_sendpiobufcnt;
-	ipath_kreg kr_sendpiosize;
-	ipath_kreg kr_sendregbase;
-	ipath_kreg kr_userregbase;
-	/* These are the 64 bit group */
-	ipath_kreg kr_debugport;
-	ipath_kreg kr_debugportselect;
-	ipath_kreg kr_errorclear;
-	ipath_kreg kr_errormask;
-	ipath_kreg kr_errorstatus;
-	ipath_kreg kr_extctrl;
-	ipath_kreg kr_extstatus;
-	ipath_kreg kr_gpio_clear;
-	ipath_kreg kr_gpio_mask;
-	ipath_kreg kr_gpio_out;
-	ipath_kreg kr_gpio_status;
-	ipath_kreg kr_hwdiagctrl;
-	ipath_kreg kr_hwerrclear;
-	ipath_kreg kr_hwerrmask;
-	ipath_kreg kr_hwerrstatus;
-	ipath_kreg kr_ibcctrl;
-	ipath_kreg kr_ibcstatus;
-	ipath_kreg kr_intblocked;
-	ipath_kreg kr_intclear;
-	ipath_kreg kr_interruptconfig;
-	ipath_kreg kr_mdio;
-	ipath_kreg kr_partitionkey;
-	ipath_kreg kr_rcvbthqp;
-	ipath_kreg kr_rcvbufbase;
-	ipath_kreg kr_rcvbufsize;
-	ipath_kreg kr_rcvctrl;
-	ipath_kreg kr_rcvhdrcnt;
-	ipath_kreg kr_rcvhdrentsize;
-	ipath_kreg kr_rcvhdrsize;
-	ipath_kreg kr_rcvintmembase;
-	ipath_kreg kr_rcvintmemsize;
-	ipath_kreg kr_revision;
-	ipath_kreg kr_sendbuffererror;
-	ipath_kreg kr_sendpioavailaddr;
-	ipath_kreg kr_serdesconfig0;
-	ipath_kreg kr_serdesconfig1;
-	ipath_kreg kr_serdesstatus;
-	ipath_kreg kr_txintmembase;
-	ipath_kreg kr_txintmemsize;
-	ipath_kreg kr_xgxsconfig;
-	ipath_kreg kr_ibpllcfg;
-	/* use these two (and the following N ports) only with
-	 * ipath_k*_kreg64_port(); not *kreg64() */
-	ipath_kreg kr_rcvhdraddr;
-	ipath_kreg kr_rcvhdrtailaddr;
-
-	/* remaining registers are not present on all types of infinipath
-	   chips  */
-	ipath_kreg kr_rcvpktledcnt;
-	ipath_kreg kr_pcierbuftestreg0;
-	ipath_kreg kr_pcierbuftestreg1;
-	ipath_kreg kr_pcieq0serdesconfig0;
-	ipath_kreg kr_pcieq0serdesconfig1;
-	ipath_kreg kr_pcieq0serdesstatus;
-	ipath_kreg kr_pcieq1serdesconfig0;
-	ipath_kreg kr_pcieq1serdesconfig1;
-	ipath_kreg kr_pcieq1serdesstatus;
-	ipath_kreg kr_hrtbt_guid;
-	ipath_kreg kr_ibcddrctrl;
-	ipath_kreg kr_ibcddrstatus;
-	ipath_kreg kr_jintreload;
-
-	/* send dma related regs */
-	ipath_kreg kr_senddmabase;
-	ipath_kreg kr_senddmalengen;
-	ipath_kreg kr_senddmatail;
-	ipath_kreg kr_senddmahead;
-	ipath_kreg kr_senddmaheadaddr;
-	ipath_kreg kr_senddmabufmask0;
-	ipath_kreg kr_senddmabufmask1;
-	ipath_kreg kr_senddmabufmask2;
-	ipath_kreg kr_senddmastatus;
-
-	/* SerDes related regs (IBA7220-only) */
-	ipath_kreg kr_ibserdesctrl;
-	ipath_kreg kr_ib_epbacc;
-	ipath_kreg kr_ib_epbtrans;
-	ipath_kreg kr_pcie_epbacc;
-	ipath_kreg kr_pcie_epbtrans;
-	ipath_kreg kr_ib_ddsrxeq;
-};
-
-struct ipath_cregs {
-	ipath_creg cr_badformatcnt;
-	ipath_creg cr_erricrccnt;
-	ipath_creg cr_errlinkcnt;
-	ipath_creg cr_errlpcrccnt;
-	ipath_creg cr_errpkey;
-	ipath_creg cr_errrcvflowctrlcnt;
-	ipath_creg cr_err_rlencnt;
-	ipath_creg cr_errslencnt;
-	ipath_creg cr_errtidfull;
-	ipath_creg cr_errtidvalid;
-	ipath_creg cr_errvcrccnt;
-	ipath_creg cr_ibstatuschange;
-	ipath_creg cr_intcnt;
-	ipath_creg cr_invalidrlencnt;
-	ipath_creg cr_invalidslencnt;
-	ipath_creg cr_lbflowstallcnt;
-	ipath_creg cr_iblinkdowncnt;
-	ipath_creg cr_iblinkerrrecovcnt;
-	ipath_creg cr_ibsymbolerrcnt;
-	ipath_creg cr_pktrcvcnt;
-	ipath_creg cr_pktrcvflowctrlcnt;
-	ipath_creg cr_pktsendcnt;
-	ipath_creg cr_pktsendflowcnt;
-	ipath_creg cr_portovflcnt;
-	ipath_creg cr_rcvebpcnt;
-	ipath_creg cr_rcvovflcnt;
-	ipath_creg cr_rxdroppktcnt;
-	ipath_creg cr_senddropped;
-	ipath_creg cr_sendstallcnt;
-	ipath_creg cr_sendunderruncnt;
-	ipath_creg cr_unsupvlcnt;
-	ipath_creg cr_wordrcvcnt;
-	ipath_creg cr_wordsendcnt;
-	ipath_creg cr_vl15droppedpktcnt;
-	ipath_creg cr_rxotherlocalphyerrcnt;
-	ipath_creg cr_excessbufferovflcnt;
-	ipath_creg cr_locallinkintegrityerrcnt;
-	ipath_creg cr_rxvlerrcnt;
-	ipath_creg cr_rxdlidfltrcnt;
-	ipath_creg cr_psstat;
-	ipath_creg cr_psstart;
-	ipath_creg cr_psinterval;
-	ipath_creg cr_psrcvdatacount;
-	ipath_creg cr_psrcvpktscount;
-	ipath_creg cr_psxmitdatacount;
-	ipath_creg cr_psxmitpktscount;
-	ipath_creg cr_psxmitwaitcount;
-};
-
-#endif				/* _IPATH_REGISTERS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c
deleted file mode 100644
index e541a01..0000000
--- a/drivers/staging/rdma/ipath/ipath_ruc.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/*
- * Convert the AETH RNR timeout code into the number of milliseconds.
- */
-const u32 ib_ipath_rnr_table[32] = {
-	656,			/* 0 */
-	1,			/* 1 */
-	1,			/* 2 */
-	1,			/* 3 */
-	1,			/* 4 */
-	1,			/* 5 */
-	1,			/* 6 */
-	1,			/* 7 */
-	1,			/* 8 */
-	1,			/* 9 */
-	1,			/* A */
-	1,			/* B */
-	1,			/* C */
-	1,			/* D */
-	2,			/* E */
-	2,			/* F */
-	3,			/* 10 */
-	4,			/* 11 */
-	6,			/* 12 */
-	8,			/* 13 */
-	11,			/* 14 */
-	16,			/* 15 */
-	21,			/* 16 */
-	31,			/* 17 */
-	41,			/* 18 */
-	62,			/* 19 */
-	82,			/* 1A */
-	123,			/* 1B */
-	164,			/* 1C */
-	246,			/* 1D */
-	328,			/* 1E */
-	492			/* 1F */
-};
-
-/**
- * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
- * @qp: the QP
- *
- * Called with the QP s_lock held and interrupts disabled.
- * XXX Use a simple list for now.  We might need a priority
- * queue if we have lots of QPs waiting for RNR timeouts
- * but that should be rare.
- */
-void ipath_insert_rnr_queue(struct ipath_qp *qp)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-
-	/* We already did a spin_lock_irqsave(), so just use spin_lock */
-	spin_lock(&dev->pending_lock);
-	if (list_empty(&dev->rnrwait))
-		list_add(&qp->timerwait, &dev->rnrwait);
-	else {
-		struct list_head *l = &dev->rnrwait;
-		struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
-						  timerwait);
-
-		while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
-			qp->s_rnr_timeout -= nqp->s_rnr_timeout;
-			l = l->next;
-			if (l->next == &dev->rnrwait) {
-				nqp = NULL;
-				break;
-			}
-			nqp = list_entry(l->next, struct ipath_qp,
-					 timerwait);
-		}
-		if (nqp)
-			nqp->s_rnr_timeout -= qp->s_rnr_timeout;
-		list_add(&qp->timerwait, l);
-	}
-	spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_init_sge - Validate a RWQE and fill in the SGE state
- * @qp: the QP
- *
- * Return 1 if OK.
- */
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
-		   u32 *lengthp, struct ipath_sge_state *ss)
-{
-	int i, j, ret;
-	struct ib_wc wc;
-
-	*lengthp = 0;
-	for (i = j = 0; i < wqe->num_sge; i++) {
-		if (wqe->sg_list[i].length == 0)
-			continue;
-		/* Check LKEY */
-		if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
-				   &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
-			goto bad_lkey;
-		*lengthp += wqe->sg_list[i].length;
-		j++;
-	}
-	ss->num_sge = j;
-	ret = 1;
-	goto bail;
-
-bad_lkey:
-	memset(&wc, 0, sizeof(wc));
-	wc.wr_id = wqe->wr_id;
-	wc.status = IB_WC_LOC_PROT_ERR;
-	wc.opcode = IB_WC_RECV;
-	wc.qp = &qp->ibqp;
-	/* Signal solicited completion event. */
-	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-	ret = 0;
-bail:
-	return ret;
-}
-
-/**
- * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return 0 if no RWQE is available, otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
-{
-	unsigned long flags;
-	struct ipath_rq *rq;
-	struct ipath_rwq *wq;
-	struct ipath_srq *srq;
-	struct ipath_rwqe *wqe;
-	void (*handler)(struct ib_event *, void *);
-	u32 tail;
-	int ret;
-
-	if (qp->ibqp.srq) {
-		srq = to_isrq(qp->ibqp.srq);
-		handler = srq->ibsrq.event_handler;
-		rq = &srq->rq;
-	} else {
-		srq = NULL;
-		handler = NULL;
-		rq = &qp->r_rq;
-	}
-
-	spin_lock_irqsave(&rq->lock, flags);
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-		ret = 0;
-		goto unlock;
-	}
-
-	wq = rq->wq;
-	tail = wq->tail;
-	/* Validate tail before using it since it is user writable. */
-	if (tail >= rq->size)
-		tail = 0;
-	do {
-		if (unlikely(tail == wq->head)) {
-			ret = 0;
-			goto unlock;
-		}
-		/* Make sure entry is read after head index is read. */
-		smp_rmb();
-		wqe = get_rwqe_ptr(rq, tail);
-		if (++tail >= rq->size)
-			tail = 0;
-		if (wr_id_only)
-			break;
-		qp->r_sge.sg_list = qp->r_sg_list;
-	} while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
-	qp->r_wr_id = wqe->wr_id;
-	wq->tail = tail;
-
-	ret = 1;
-	set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
-	if (handler) {
-		u32 n;
-
-		/*
-		 * validate head pointer value and compute
-		 * the number of remaining WQEs.
-		 */
-		n = wq->head;
-		if (n >= rq->size)
-			n = 0;
-		if (n < tail)
-			n += rq->size - tail;
-		else
-			n -= tail;
-		if (n < srq->limit) {
-			struct ib_event ev;
-
-			srq->limit = 0;
-			spin_unlock_irqrestore(&rq->lock, flags);
-			ev.device = qp->ibqp.device;
-			ev.element.srq = qp->ibqp.srq;
-			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-			handler(&ev, srq->ibsrq.srq_context);
-			goto bail;
-		}
-	}
-unlock:
-	spin_unlock_irqrestore(&rq->lock, flags);
-bail:
-	return ret;
-}
-
-/**
- * ipath_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from ipath_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send().  We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ipath_ruc_loopback(struct ipath_qp *sqp)
-{
-	struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
-	struct ipath_qp *qp;
-	struct ipath_swqe *wqe;
-	struct ipath_sge *sge;
-	unsigned long flags;
-	struct ib_wc wc;
-	u64 sdata;
-	atomic64_t *maddr;
-	enum ib_wc_status send_status;
-
-	/*
-	 * Note that we check the responder QP state after
-	 * checking the requester's state.
-	 */
-	qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
-
-	spin_lock_irqsave(&sqp->s_lock, flags);
-
-	/* Return if we are already busy processing a work request. */
-	if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
-	    !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
-		goto unlock;
-
-	sqp->s_flags |= IPATH_S_BUSY;
-
-again:
-	if (sqp->s_last == sqp->s_head)
-		goto clr_busy;
-	wqe = get_swqe_ptr(sqp, sqp->s_last);
-
-	/* Return if it is not OK to start a new work reqeust. */
-	if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
-		if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND))
-			goto clr_busy;
-		/* We are in the error state, flush the work request. */
-		send_status = IB_WC_WR_FLUSH_ERR;
-		goto flush_send;
-	}
-
-	/*
-	 * We can rely on the entry not changing without the s_lock
-	 * being held until we update s_last.
-	 * We increment s_cur to indicate s_last is in progress.
-	 */
-	if (sqp->s_last == sqp->s_cur) {
-		if (++sqp->s_cur >= sqp->s_size)
-			sqp->s_cur = 0;
-	}
-	spin_unlock_irqrestore(&sqp->s_lock, flags);
-
-	if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-		dev->n_pkt_drops++;
-		/*
-		 * For RC, the requester would timeout and retry so
-		 * shortcut the timeouts and just signal too many retries.
-		 */
-		if (sqp->ibqp.qp_type == IB_QPT_RC)
-			send_status = IB_WC_RETRY_EXC_ERR;
-		else
-			send_status = IB_WC_SUCCESS;
-		goto serr;
-	}
-
-	memset(&wc, 0, sizeof wc);
-	send_status = IB_WC_SUCCESS;
-
-	sqp->s_sge.sge = wqe->sg_list[0];
-	sqp->s_sge.sg_list = wqe->sg_list + 1;
-	sqp->s_sge.num_sge = wqe->wr.num_sge;
-	sqp->s_len = wqe->length;
-	switch (wqe->wr.opcode) {
-	case IB_WR_SEND_WITH_IMM:
-		wc.wc_flags = IB_WC_WITH_IMM;
-		wc.ex.imm_data = wqe->wr.ex.imm_data;
-		/* FALLTHROUGH */
-	case IB_WR_SEND:
-		if (!ipath_get_rwqe(qp, 0))
-			goto rnr_nak;
-		break;
-
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
-			goto inv_err;
-		wc.wc_flags = IB_WC_WITH_IMM;
-		wc.ex.imm_data = wqe->wr.ex.imm_data;
-		if (!ipath_get_rwqe(qp, 1))
-			goto rnr_nak;
-		/* FALLTHROUGH */
-	case IB_WR_RDMA_WRITE:
-		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
-			goto inv_err;
-		if (wqe->length == 0)
-			break;
-		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
-					    wqe->rdma_wr.remote_addr,
-					    wqe->rdma_wr.rkey,
-					    IB_ACCESS_REMOTE_WRITE)))
-			goto acc_err;
-		break;
-
-	case IB_WR_RDMA_READ:
-		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
-			goto inv_err;
-		if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
-					    wqe->rdma_wr.remote_addr,
-					    wqe->rdma_wr.rkey,
-					    IB_ACCESS_REMOTE_READ)))
-			goto acc_err;
-		qp->r_sge.sge = wqe->sg_list[0];
-		qp->r_sge.sg_list = wqe->sg_list + 1;
-		qp->r_sge.num_sge = wqe->wr.num_sge;
-		break;
-
-	case IB_WR_ATOMIC_CMP_AND_SWP:
-	case IB_WR_ATOMIC_FETCH_AND_ADD:
-		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
-			goto inv_err;
-		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
-					    wqe->atomic_wr.remote_addr,
-					    wqe->atomic_wr.rkey,
-					    IB_ACCESS_REMOTE_ATOMIC)))
-			goto acc_err;
-		/* Perform atomic OP and save result. */
-		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-		sdata = wqe->atomic_wr.compare_add;
-		*(u64 *) sqp->s_sge.sge.vaddr =
-			(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
-			(u64) atomic64_add_return(sdata, maddr) - sdata :
-			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-				      sdata, wqe->atomic_wr.swap);
-		goto send_comp;
-
-	default:
-		send_status = IB_WC_LOC_QP_OP_ERR;
-		goto serr;
-	}
-
-	sge = &sqp->s_sge.sge;
-	while (sqp->s_len) {
-		u32 len = sqp->s_len;
-
-		if (len > sge->length)
-			len = sge->length;
-		if (len > sge->sge_length)
-			len = sge->sge_length;
-		BUG_ON(len == 0);
-		ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
-		sge->vaddr += len;
-		sge->length -= len;
-		sge->sge_length -= len;
-		if (sge->sge_length == 0) {
-			if (--sqp->s_sge.num_sge)
-				*sge = *sqp->s_sge.sg_list++;
-		} else if (sge->length == 0 && sge->mr != NULL) {
-			if (++sge->n >= IPATH_SEGSZ) {
-				if (++sge->m >= sge->mr->mapsz)
-					break;
-				sge->n = 0;
-			}
-			sge->vaddr =
-				sge->mr->map[sge->m]->segs[sge->n].vaddr;
-			sge->length =
-				sge->mr->map[sge->m]->segs[sge->n].length;
-		}
-		sqp->s_len -= len;
-	}
-
-	if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-		goto send_comp;
-
-	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-	else
-		wc.opcode = IB_WC_RECV;
-	wc.wr_id = qp->r_wr_id;
-	wc.status = IB_WC_SUCCESS;
-	wc.byte_len = wqe->length;
-	wc.qp = &qp->ibqp;
-	wc.src_qp = qp->remote_qpn;
-	wc.slid = qp->remote_ah_attr.dlid;
-	wc.sl = qp->remote_ah_attr.sl;
-	wc.port_num = 1;
-	/* Signal completion event if the solicited bit is set. */
-	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-		       wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
-	spin_lock_irqsave(&sqp->s_lock, flags);
-flush_send:
-	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
-	ipath_send_complete(sqp, wqe, send_status);
-	goto again;
-
-rnr_nak:
-	/* Handle RNR NAK */
-	if (qp->ibqp.qp_type == IB_QPT_UC)
-		goto send_comp;
-	/*
-	 * Note: we don't need the s_lock held since the BUSY flag
-	 * makes this single threaded.
-	 */
-	if (sqp->s_rnr_retry == 0) {
-		send_status = IB_WC_RNR_RETRY_EXC_ERR;
-		goto serr;
-	}
-	if (sqp->s_rnr_retry_cnt < 7)
-		sqp->s_rnr_retry--;
-	spin_lock_irqsave(&sqp->s_lock, flags);
-	if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
-		goto clr_busy;
-	sqp->s_flags |= IPATH_S_WAITING;
-	dev->n_rnr_naks++;
-	sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
-	ipath_insert_rnr_queue(sqp);
-	goto clr_busy;
-
-inv_err:
-	send_status = IB_WC_REM_INV_REQ_ERR;
-	wc.status = IB_WC_LOC_QP_OP_ERR;
-	goto err;
-
-acc_err:
-	send_status = IB_WC_REM_ACCESS_ERR;
-	wc.status = IB_WC_LOC_PROT_ERR;
-err:
-	/* responder goes to error state */
-	ipath_rc_error(qp, wc.status);
-
-serr:
-	spin_lock_irqsave(&sqp->s_lock, flags);
-	ipath_send_complete(sqp, wqe, send_status);
-	if (sqp->ibqp.qp_type == IB_QPT_RC) {
-		int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
-		sqp->s_flags &= ~IPATH_S_BUSY;
-		spin_unlock_irqrestore(&sqp->s_lock, flags);
-		if (lastwqe) {
-			struct ib_event ev;
-
-			ev.device = sqp->ibqp.device;
-			ev.element.qp = &sqp->ibqp;
-			ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-			sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
-		}
-		goto done;
-	}
-clr_busy:
-	sqp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-	spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
-	if (qp && atomic_dec_and_test(&qp->refcount))
-		wake_up(&qp->wait);
-}
-
-static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
-{
-	if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
-	    qp->ibqp.qp_type == IB_QPT_SMI) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-		dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-				 dd->ipath_sendctrl);
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-	}
-}
-
-/**
- * ipath_no_bufs_available - tell the layer driver we need buffers
- * @qp: the QP that caused the problem
- * @dev: the device we ran out of buffers on
- *
- * Called when we run out of PIO buffers.
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int ipath_no_bufs_available(struct ipath_qp *qp,
-				    struct ipath_ibdev *dev)
-{
-	unsigned long flags;
-	int ret = 1;
-
-	/*
-	 * Note that as soon as want_buffer() is called and
-	 * possibly before it returns, ipath_ib_piobufavail()
-	 * could be called. Therefore, put QP on the piowait list before
-	 * enabling the PIO avail interrupt.
-	 */
-	spin_lock_irqsave(&qp->s_lock, flags);
-	if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
-		dev->n_piowait++;
-		qp->s_flags |= IPATH_S_WAITING;
-		qp->s_flags &= ~IPATH_S_BUSY;
-		spin_lock(&dev->pending_lock);
-		if (list_empty(&qp->piowait))
-			list_add_tail(&qp->piowait, &dev->piowait);
-		spin_unlock(&dev->pending_lock);
-	} else
-		ret = 0;
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	if (ret)
-		want_buffer(dev->dd, qp);
-	return ret;
-}
-
-/**
- * ipath_make_grh - construct a GRH header
- * @dev: a pointer to the ipath device
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
-		   struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
-	hdr->version_tclass_flow =
-		cpu_to_be32((6 << 28) |
-			    (grh->traffic_class << 20) |
-			    grh->flow_label);
-	hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
-	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
-	hdr->next_hdr = 0x1B;
-	hdr->hop_limit = grh->hop_limit;
-	/* The SGID is 32-bit aligned. */
-	hdr->sgid.global.subnet_prefix = dev->gid_prefix;
-	hdr->sgid.global.interface_id = dev->dd->ipath_guid;
-	hdr->dgid = grh->dgid;
-
-	/* GRH header size in 32-bit words. */
-	return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
-			   struct ipath_other_headers *ohdr,
-			   u32 bth0, u32 bth2)
-{
-	u16 lrh0;
-	u32 nwords;
-	u32 extra_bytes;
-
-	/* Construct the header. */
-	extra_bytes = -qp->s_cur_size & 3;
-	nwords = (qp->s_cur_size + extra_bytes) >> 2;
-	lrh0 = IPATH_LRH_BTH;
-	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-		qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
-						 &qp->remote_ah_attr.grh,
-						 qp->s_hdrwords, nwords);
-		lrh0 = IPATH_LRH_GRH;
-	}
-	lrh0 |= qp->remote_ah_attr.sl << 4;
-	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
-	qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
-				       qp->remote_ah_attr.src_path_bits);
-	bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
-	bth0 |= extra_bytes << 20;
-	ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
-	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-	ohdr->bth[2] = cpu_to_be32(bth2);
-}
-
-/**
- * ipath_do_send - perform a send on a QP
- * @data: contains a pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void ipath_do_send(unsigned long data)
-{
-	struct ipath_qp *qp = (struct ipath_qp *)data;
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	int (*make_req)(struct ipath_qp *qp);
-	unsigned long flags;
-
-	if ((qp->ibqp.qp_type == IB_QPT_RC ||
-	     qp->ibqp.qp_type == IB_QPT_UC) &&
-	    qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
-		ipath_ruc_loopback(qp);
-		goto bail;
-	}
-
-	if (qp->ibqp.qp_type == IB_QPT_RC)
-	       make_req = ipath_make_rc_req;
-	else if (qp->ibqp.qp_type == IB_QPT_UC)
-	       make_req = ipath_make_uc_req;
-	else
-	       make_req = ipath_make_ud_req;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	/* Return if we are already busy processing a work request. */
-	if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
-	    !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-		goto bail;
-	}
-
-	qp->s_flags |= IPATH_S_BUSY;
-
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-
-again:
-	/* Check for a constructed packet to be sent. */
-	if (qp->s_hdrwords != 0) {
-		/*
-		 * If no PIO bufs are available, return.  An interrupt will
-		 * call ipath_ib_piobufavail() when one is available.
-		 */
-		if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
-				     qp->s_cur_sge, qp->s_cur_size)) {
-			if (ipath_no_bufs_available(qp, dev))
-				goto bail;
-		}
-		dev->n_unicast_xmit++;
-		/* Record that we sent the packet and s_hdr is empty. */
-		qp->s_hdrwords = 0;
-	}
-
-	if (make_req(qp))
-		goto again;
-
-bail:;
-}
-
-/*
- * This should be called with s_lock held.
- */
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
-			 enum ib_wc_status status)
-{
-	u32 old_last, last;
-
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
-		return;
-
-	/* See ch. 11.2.4.1 and 10.7.3.1 */
-	if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
-	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
-	    status != IB_WC_SUCCESS) {
-		struct ib_wc wc;
-
-		memset(&wc, 0, sizeof wc);
-		wc.wr_id = wqe->wr.wr_id;
-		wc.status = status;
-		wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-		wc.qp = &qp->ibqp;
-		if (status == IB_WC_SUCCESS)
-			wc.byte_len = wqe->length;
-		ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
-			       status != IB_WC_SUCCESS);
-	}
-
-	old_last = last = qp->s_last;
-	if (++last >= qp->s_size)
-		last = 0;
-	qp->s_last = last;
-	if (qp->s_cur == old_last)
-		qp->s_cur = last;
-	if (qp->s_tail == old_last)
-		qp->s_tail = last;
-	if (qp->state == IB_QPS_SQD && last == qp->s_cur)
-		qp->s_draining = 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sdma.c b/drivers/staging/rdma/ipath/ipath_sdma.c
deleted file mode 100644
index 1ffc06a..0000000
--- a/drivers/staging/rdma/ipath/ipath_sdma.c
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
-
-static void vl15_watchdog_enq(struct ipath_devdata *dd)
-{
-	/* ipath_sdma_lock must already be held */
-	if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
-		unsigned long interval = (HZ + 19) / 20;
-		dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
-		add_timer(&dd->ipath_sdma_vl15_timer);
-	}
-}
-
-static void vl15_watchdog_deq(struct ipath_devdata *dd)
-{
-	/* ipath_sdma_lock must already be held */
-	if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
-		unsigned long interval = (HZ + 19) / 20;
-		mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
-	} else {
-		del_timer(&dd->ipath_sdma_vl15_timer);
-	}
-}
-
-static void vl15_watchdog_timeout(unsigned long opaque)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-	if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
-		ipath_dbg("vl15 watchdog timeout - clearing\n");
-		ipath_cancel_sends(dd, 1);
-		ipath_hol_down(dd);
-	} else {
-		ipath_dbg("vl15 watchdog timeout - "
-			  "condition already cleared\n");
-	}
-}
-
-static void unmap_desc(struct ipath_devdata *dd, unsigned head)
-{
-	__le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
-	u64 desc[2];
-	dma_addr_t addr;
-	size_t len;
-
-	desc[0] = le64_to_cpu(descqp[0]);
-	desc[1] = le64_to_cpu(descqp[1]);
-
-	addr = (desc[1] << 32) | (desc[0] >> 32);
-	len = (desc[0] >> 14) & (0x7ffULL << 2);
-	dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
-}
-
-/*
- * ipath_sdma_lock should be locked before calling this.
- */
-int ipath_sdma_make_progress(struct ipath_devdata *dd)
-{
-	struct list_head *lp = NULL;
-	struct ipath_sdma_txreq *txp = NULL;
-	u16 dmahead;
-	u16 start_idx = 0;
-	int progress = 0;
-
-	if (!list_empty(&dd->ipath_sdma_activelist)) {
-		lp = dd->ipath_sdma_activelist.next;
-		txp = list_entry(lp, struct ipath_sdma_txreq, list);
-		start_idx = txp->start_idx;
-	}
-
-	/*
-	 * Read the SDMA head register in order to know that the
-	 * interrupt clear has been written to the chip.
-	 * Otherwise, we may not get an interrupt for the last
-	 * descriptor in the queue.
-	 */
-	dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
-	/* sanity check return value for error handling (chip reset, etc.) */
-	if (dmahead >= dd->ipath_sdma_descq_cnt)
-		goto done;
-
-	while (dd->ipath_sdma_descq_head != dmahead) {
-		if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
-		    dd->ipath_sdma_descq_head == start_idx) {
-			unmap_desc(dd, dd->ipath_sdma_descq_head);
-			start_idx++;
-			if (start_idx == dd->ipath_sdma_descq_cnt)
-				start_idx = 0;
-		}
-
-		/* increment free count and head */
-		dd->ipath_sdma_descq_removed++;
-		if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
-			dd->ipath_sdma_descq_head = 0;
-
-		if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
-			/* move to notify list */
-			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-				vl15_watchdog_deq(dd);
-			list_move_tail(lp, &dd->ipath_sdma_notifylist);
-			if (!list_empty(&dd->ipath_sdma_activelist)) {
-				lp = dd->ipath_sdma_activelist.next;
-				txp = list_entry(lp, struct ipath_sdma_txreq,
-						 list);
-				start_idx = txp->start_idx;
-			} else {
-				lp = NULL;
-				txp = NULL;
-			}
-		}
-		progress = 1;
-	}
-
-	if (progress)
-		tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
-done:
-	return progress;
-}
-
-static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
-{
-	struct ipath_sdma_txreq *txp, *txp_next;
-
-	list_for_each_entry_safe(txp, txp_next, list, list) {
-		list_del_init(&txp->list);
-
-		if (txp->callback)
-			(*txp->callback)(txp->callback_cookie,
-					 txp->callback_status);
-	}
-}
-
-static void sdma_notify_taskbody(struct ipath_devdata *dd)
-{
-	unsigned long flags;
-	struct list_head list;
-
-	INIT_LIST_HEAD(&list);
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-	list_splice_init(&dd->ipath_sdma_notifylist, &list);
-
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-	ipath_sdma_notify(dd, &list);
-
-	/*
-	 * The IB verbs layer needs to see the callback before getting
-	 * the call to ipath_ib_piobufavail() because the callback
-	 * handles releasing resources the next send will need.
-	 * Otherwise, we could do these calls in
-	 * ipath_sdma_make_progress().
-	 */
-	ipath_ib_piobufavail(dd->verbs_dev);
-}
-
-static void sdma_notify_task(unsigned long opaque)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-	if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-		sdma_notify_taskbody(dd);
-}
-
-static void dump_sdma_state(struct ipath_devdata *dd)
-{
-	unsigned long reg;
-
-	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
-	ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
-
-	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
-	ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
-
-	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
-	ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
-
-	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
-	ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
-
-	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
-	ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
-
-	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
-	ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
-
-	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
-	ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
-}
-
-static void sdma_abort_task(unsigned long opaque)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-	u64 status;
-	unsigned long flags;
-
-	if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-		return;
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-	status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
-
-	/* nothing to do */
-	if (status == IPATH_SDMA_ABORT_NONE)
-		goto unlock;
-
-	/* ipath_sdma_abort() is done, waiting for interrupt */
-	if (status == IPATH_SDMA_ABORT_DISARMED) {
-		if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))
-			goto resched_noprint;
-		/* give up, intr got lost somewhere */
-		ipath_dbg("give up waiting for SDMADISABLED intr\n");
-		__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-		status = IPATH_SDMA_ABORT_ABORTED;
-	}
-
-	/* everything is stopped, time to clean up and restart */
-	if (status == IPATH_SDMA_ABORT_ABORTED) {
-		struct ipath_sdma_txreq *txp, *txpnext;
-		u64 hwstatus;
-		int notify = 0;
-
-		hwstatus = ipath_read_kreg64(dd,
-				dd->ipath_kregs->kr_senddmastatus);
-
-		if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
-				 IPATH_SDMA_STATUS_ABORT_IN_PROG	     |
-				 IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
-		    !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
-			if (dd->ipath_sdma_reset_wait > 0) {
-				/* not done shutting down sdma */
-				--dd->ipath_sdma_reset_wait;
-				goto resched;
-			}
-			ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
-				"status after SDMA reset, continuing\n");
-			dump_sdma_state(dd);
-		}
-
-		/* dequeue all "sent" requests */
-		list_for_each_entry_safe(txp, txpnext,
-					 &dd->ipath_sdma_activelist, list) {
-			txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
-			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-				vl15_watchdog_deq(dd);
-			list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
-			notify = 1;
-		}
-		if (notify)
-			tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
-		/* reset our notion of head and tail */
-		dd->ipath_sdma_descq_tail = 0;
-		dd->ipath_sdma_descq_head = 0;
-		dd->ipath_sdma_head_dma[0] = 0;
-		dd->ipath_sdma_generation = 0;
-		dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
-
-		/* Reset SendDmaLenGen */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
-			(u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
-
-		/* done with sdma state for a bit */
-		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-		/*
-		 * Don't restart sdma here (with the exception
-		 * below). Wait until link is up to ACTIVE.  VL15 MADs
-		 * used to bring the link up use PIO, and multiple link
-		 * transitions otherwise cause the sdma engine to be
-		 * stopped and started multiple times.
-		 * The disable is done here, including the shadow,
-		 * so the state is kept consistent.
-		 * See ipath_restart_sdma() for the actual starting
-		 * of sdma.
-		 */
-		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-		dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-				 dd->ipath_sendctrl);
-		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-		/* make sure I see next message */
-		dd->ipath_sdma_abort_jiffies = 0;
-
-		/*
-		 * Not everything that takes SDMA offline is a link
-		 * status change.  If the link was up, restart SDMA.
-		 */
-		if (dd->ipath_flags & IPATH_LINKACTIVE)
-			ipath_restart_sdma(dd);
-
-		goto done;
-	}
-
-resched:
-	/*
-	 * for now, keep spinning
-	 * JAG - this is bad to just have default be a loop without
-	 * state change
-	 */
-	if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {
-		ipath_dbg("looping with status 0x%08lx\n",
-			  dd->ipath_sdma_status);
-		dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
-	}
-resched_noprint:
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-	if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-		tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-	return;
-
-unlock:
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-done:
-	return;
-}
-
-/*
- * This is called from interrupt context.
- */
-void ipath_sdma_intr(struct ipath_devdata *dd)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-	(void) ipath_sdma_make_progress(dd);
-
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-}
-
-static int alloc_sdma(struct ipath_devdata *dd)
-{
-	int ret = 0;
-
-	/* Allocate memory for SendDMA descriptor FIFO */
-	dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
-		SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
-
-	if (!dd->ipath_sdma_descq) {
-		ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
-			"FIFO memory\n");
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	dd->ipath_sdma_descq_cnt =
-		SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
-
-	/* Allocate memory for DMA of head register to memory */
-	dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
-		PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
-	if (!dd->ipath_sdma_head_dma) {
-		ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
-		ret = -ENOMEM;
-		goto cleanup_descq;
-	}
-	dd->ipath_sdma_head_dma[0] = 0;
-
-	setup_timer(&dd->ipath_sdma_vl15_timer, vl15_watchdog_timeout,
-			(unsigned long)dd);
-
-	atomic_set(&dd->ipath_sdma_vl15_count, 0);
-
-	goto done;
-
-cleanup_descq:
-	dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
-		(void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
-	dd->ipath_sdma_descq = NULL;
-	dd->ipath_sdma_descq_phys = 0;
-done:
-	return ret;
-}
-
-int setup_sdma(struct ipath_devdata *dd)
-{
-	int ret = 0;
-	unsigned i, n;
-	u64 tmp64;
-	u64 senddmabufmask[3] = { 0 };
-	unsigned long flags;
-
-	ret = alloc_sdma(dd);
-	if (ret)
-		goto done;
-
-	if (!dd->ipath_sdma_descq) {
-		ipath_dev_err(dd, "SendDMA memory not allocated\n");
-		goto done;
-	}
-
-	/*
-	 * Set initial status as if we had been up, then gone down.
-	 * This lets initial start on transition to ACTIVE be the
-	 * same as restart after link flap.
-	 */
-	dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
-	dd->ipath_sdma_abort_jiffies = 0;
-	dd->ipath_sdma_generation = 0;
-	dd->ipath_sdma_descq_tail = 0;
-	dd->ipath_sdma_descq_head = 0;
-	dd->ipath_sdma_descq_removed = 0;
-	dd->ipath_sdma_descq_added = 0;
-
-	/* Set SendDmaBase */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
-			 dd->ipath_sdma_descq_phys);
-	/* Set SendDmaLenGen */
-	tmp64 = dd->ipath_sdma_descq_cnt;
-	tmp64 |= 1<<18; /* enable generation checking */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
-	/* Set SendDmaTail */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
-			 dd->ipath_sdma_descq_tail);
-	/* Set SendDmaHeadAddr */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
-			 dd->ipath_sdma_head_phys);
-
-	/*
-	 * Reserve all the former "kernel" piobufs, using high number range
-	 * so we get as many 4K buffers as possible
-	 */
-	n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-	i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
-	ipath_chg_pioavailkernel(dd, i, n - i , 0);
-	for (; i < n; ++i) {
-		unsigned word = i / 64;
-		unsigned bit = i & 63;
-		BUG_ON(word >= 3);
-		senddmabufmask[word] |= 1ULL << bit;
-	}
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
-			 senddmabufmask[0]);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
-			 senddmabufmask[1]);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
-			 senddmabufmask[2]);
-
-	INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
-	INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
-
-	tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
-		     (unsigned long) dd);
-	tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
-		     (unsigned long) dd);
-
-	/*
-	 * No use to turn on SDMA here, as link is probably not ACTIVE
-	 * Just mark it RUNNING and enable the interrupt, and let the
-	 * ipath_restart_sdma() on link transition to ACTIVE actually
-	 * enable it.
-	 */
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	__set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-done:
-	return ret;
-}
-
-void teardown_sdma(struct ipath_devdata *dd)
-{
-	struct ipath_sdma_txreq *txp, *txpnext;
-	unsigned long flags;
-	dma_addr_t sdma_head_phys = 0;
-	dma_addr_t sdma_descq_phys = 0;
-	void *sdma_descq = NULL;
-	void *sdma_head_dma = NULL;
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-	__clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
-	__set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-	__set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-	tasklet_kill(&dd->ipath_sdma_abort_task);
-	tasklet_kill(&dd->ipath_sdma_notify_task);
-
-	/* turn off sdma */
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-		dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-	/* dequeue all "sent" requests */
-	list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
-				 list) {
-		txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
-		if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-			vl15_watchdog_deq(dd);
-		list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
-	}
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-	sdma_notify_taskbody(dd);
-
-	del_timer_sync(&dd->ipath_sdma_vl15_timer);
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-	dd->ipath_sdma_abort_jiffies = 0;
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
-
-	if (dd->ipath_sdma_head_dma) {
-		sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
-		sdma_head_phys = dd->ipath_sdma_head_phys;
-		dd->ipath_sdma_head_dma = NULL;
-		dd->ipath_sdma_head_phys = 0;
-	}
-
-	if (dd->ipath_sdma_descq) {
-		sdma_descq = dd->ipath_sdma_descq;
-		sdma_descq_phys = dd->ipath_sdma_descq_phys;
-		dd->ipath_sdma_descq = NULL;
-		dd->ipath_sdma_descq_phys = 0;
-	}
-
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-	if (sdma_head_dma)
-		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-				  sdma_head_dma, sdma_head_phys);
-
-	if (sdma_descq)
-		dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
-				  sdma_descq, sdma_descq_phys);
-}
-
-/*
- * [Re]start SDMA, if we use it, and it's not already OK.
- * This is called on transition to link ACTIVE, either the first or
- * subsequent times.
- */
-void ipath_restart_sdma(struct ipath_devdata *dd)
-{
-	unsigned long flags;
-	int needed = 1;
-
-	if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
-		goto bail;
-
-	/*
-	 * First, make sure we should, which is to say,
-	 * check that we are "RUNNING" (not in teardown)
-	 * and not "SHUTDOWN"
-	 */
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-	if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
-		|| test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-			needed = 0;
-	else {
-		__clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-		__clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
-		__clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-	}
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-	if (!needed) {
-		ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
-			dd->ipath_sdma_status);
-		goto bail;
-	}
-	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-	/*
-	 * First clear, just to be safe. Enable is only done
-	 * in chip on 0->1 transition
-	 */
-	dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-	/* notify upper layers */
-	ipath_ib_piobufavail(dd->verbs_dev);
-
-bail:
-	return;
-}
-
-static inline void make_sdma_desc(struct ipath_devdata *dd,
-	u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
-{
-	WARN_ON(addr & 3);
-	/* SDmaPhyAddr[47:32] */
-	sdmadesc[1] = addr >> 32;
-	/* SDmaPhyAddr[31:0] */
-	sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
-	/* SDmaGeneration[1:0] */
-	sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
-	/* SDmaDwordCount[10:0] */
-	sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
-	/* SDmaBufOffset[12:2] */
-	sdmadesc[0] |= dwoffset & 0x7ffULL;
-}
-
-/*
- * This function queues one IB packet onto the send DMA queue per call.
- * The caller is responsible for checking:
- * 1) The number of send DMA descriptor entries is less than the size of
- *    the descriptor queue.
- * 2) The IB SGE addresses and lengths are 32-bit aligned
- *    (except possibly the last SGE's length)
- * 3) The SGE addresses are suitable for passing to dma_map_single().
- */
-int ipath_sdma_verbs_send(struct ipath_devdata *dd,
-	struct ipath_sge_state *ss, u32 dwords,
-	struct ipath_verbs_txreq *tx)
-{
-
-	unsigned long flags;
-	struct ipath_sge *sge;
-	int ret = 0;
-	u16 tail;
-	__le64 *descqp;
-	u64 sdmadesc[2];
-	u32 dwoffset;
-	dma_addr_t addr;
-
-	if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
-		ipath_dbg("packet size %X > ibmax %X, fail\n",
-			tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
-		ret = -EMSGSIZE;
-		goto fail;
-	}
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-retry:
-	if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
-		ret = -EBUSY;
-		goto unlock;
-	}
-
-	if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
-		if (ipath_sdma_make_progress(dd))
-			goto retry;
-		ret = -ENOBUFS;
-		goto unlock;
-	}
-
-	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
-			      tx->map_len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&dd->pcidev->dev, addr))
-		goto ioerr;
-
-	dwoffset = tx->map_len >> 2;
-	make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
-
-	/* SDmaFirstDesc */
-	sdmadesc[0] |= 1ULL << 12;
-	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
-		sdmadesc[0] |= 1ULL << 14;	/* SDmaUseLargeBuf */
-
-	/* write to the descq */
-	tail = dd->ipath_sdma_descq_tail;
-	descqp = &dd->ipath_sdma_descq[tail].qw[0];
-	*descqp++ = cpu_to_le64(sdmadesc[0]);
-	*descqp++ = cpu_to_le64(sdmadesc[1]);
-
-	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
-		tx->txreq.start_idx = tail;
-
-	/* increment the tail */
-	if (++tail == dd->ipath_sdma_descq_cnt) {
-		tail = 0;
-		descqp = &dd->ipath_sdma_descq[0].qw[0];
-		++dd->ipath_sdma_generation;
-	}
-
-	sge = &ss->sge;
-	while (dwords) {
-		u32 dw;
-		u32 len;
-
-		len = dwords << 2;
-		if (len > sge->length)
-			len = sge->length;
-		if (len > sge->sge_length)
-			len = sge->sge_length;
-		BUG_ON(len == 0);
-		dw = (len + 3) >> 2;
-		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
-				      DMA_TO_DEVICE);
-		if (dma_mapping_error(&dd->pcidev->dev, addr))
-			goto unmap;
-		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
-		/* SDmaUseLargeBuf has to be set in every descriptor */
-		if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
-			sdmadesc[0] |= 1ULL << 14;
-		/* write to the descq */
-		*descqp++ = cpu_to_le64(sdmadesc[0]);
-		*descqp++ = cpu_to_le64(sdmadesc[1]);
-
-		/* increment the tail */
-		if (++tail == dd->ipath_sdma_descq_cnt) {
-			tail = 0;
-			descqp = &dd->ipath_sdma_descq[0].qw[0];
-			++dd->ipath_sdma_generation;
-		}
-		sge->vaddr += len;
-		sge->length -= len;
-		sge->sge_length -= len;
-		if (sge->sge_length == 0) {
-			if (--ss->num_sge)
-				*sge = *ss->sg_list++;
-		} else if (sge->length == 0 && sge->mr != NULL) {
-			if (++sge->n >= IPATH_SEGSZ) {
-				if (++sge->m >= sge->mr->mapsz)
-					break;
-				sge->n = 0;
-			}
-			sge->vaddr =
-				sge->mr->map[sge->m]->segs[sge->n].vaddr;
-			sge->length =
-				sge->mr->map[sge->m]->segs[sge->n].length;
-		}
-
-		dwoffset += dw;
-		dwords -= dw;
-	}
-
-	if (!tail)
-		descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
-	descqp -= 2;
-	/* SDmaLastDesc */
-	descqp[0] |= cpu_to_le64(1ULL << 11);
-	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
-		/* SDmaIntReq */
-		descqp[0] |= cpu_to_le64(1ULL << 15);
-	}
-
-	/* Commit writes to memory and advance the tail on the chip */
-	wmb();
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
-
-	tx->txreq.next_descq_idx = tail;
-	tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
-	dd->ipath_sdma_descq_tail = tail;
-	dd->ipath_sdma_descq_added += tx->txreq.sg_count;
-	list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
-	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
-		vl15_watchdog_enq(dd);
-	goto unlock;
-
-unmap:
-	while (tail != dd->ipath_sdma_descq_tail) {
-		if (!tail)
-			tail = dd->ipath_sdma_descq_cnt - 1;
-		else
-			tail--;
-		unmap_desc(dd, tail);
-	}
-ioerr:
-	ret = -EIO;
-unlock:
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-fail:
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_srq.c b/drivers/staging/rdma/ipath/ipath_srq.c
deleted file mode 100644
index 2627198..0000000
--- a/drivers/staging/rdma/ipath/ipath_srq.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: the first WR to cause a problem is put here
- *
- * This may be called from interrupt context.
- */
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
-			   struct ib_recv_wr **bad_wr)
-{
-	struct ipath_srq *srq = to_isrq(ibsrq);
-	struct ipath_rwq *wq;
-	unsigned long flags;
-	int ret;
-
-	for (; wr; wr = wr->next) {
-		struct ipath_rwqe *wqe;
-		u32 next;
-		int i;
-
-		if ((unsigned) wr->num_sge > srq->rq.max_sge) {
-			*bad_wr = wr;
-			ret = -EINVAL;
-			goto bail;
-		}
-
-		spin_lock_irqsave(&srq->rq.lock, flags);
-		wq = srq->rq.wq;
-		next = wq->head + 1;
-		if (next >= srq->rq.size)
-			next = 0;
-		if (next == wq->tail) {
-			spin_unlock_irqrestore(&srq->rq.lock, flags);
-			*bad_wr = wr;
-			ret = -ENOMEM;
-			goto bail;
-		}
-
-		wqe = get_rwqe_ptr(&srq->rq, wq->head);
-		wqe->wr_id = wr->wr_id;
-		wqe->num_sge = wr->num_sge;
-		for (i = 0; i < wr->num_sge; i++)
-			wqe->sg_list[i] = wr->sg_list[i];
-		/* Make sure queue entry is written before the head index. */
-		smp_wmb();
-		wq->head = next;
-		spin_unlock_irqrestore(&srq->rq.lock, flags);
-	}
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
- * @srq_init_attr: the attributes of the SRQ
- * @udata: data from libipathverbs when creating a user SRQ
- */
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
-				struct ib_srq_init_attr *srq_init_attr,
-				struct ib_udata *udata)
-{
-	struct ipath_ibdev *dev = to_idev(ibpd->device);
-	struct ipath_srq *srq;
-	u32 sz;
-	struct ib_srq *ret;
-
-	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
-		ret = ERR_PTR(-ENOSYS);
-		goto done;
-	}
-
-	if (srq_init_attr->attr.max_wr == 0) {
-		ret = ERR_PTR(-EINVAL);
-		goto done;
-	}
-
-	if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
-	    (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
-		ret = ERR_PTR(-EINVAL);
-		goto done;
-	}
-
-	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
-	if (!srq) {
-		ret = ERR_PTR(-ENOMEM);
-		goto done;
-	}
-
-	/*
-	 * Need to use vmalloc() if we want to support large #s of entries.
-	 */
-	srq->rq.size = srq_init_attr->attr.max_wr + 1;
-	srq->rq.max_sge = srq_init_attr->attr.max_sge;
-	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
-		sizeof(struct ipath_rwqe);
-	srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
-	if (!srq->rq.wq) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail_srq;
-	}
-
-	/*
-	 * Return the address of the RWQ as the offset to mmap.
-	 * See ipath_mmap() for details.
-	 */
-	if (udata && udata->outlen >= sizeof(__u64)) {
-		int err;
-		u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
-
-		srq->ip =
-		    ipath_create_mmap_info(dev, s,
-					   ibpd->uobject->context,
-					   srq->rq.wq);
-		if (!srq->ip) {
-			ret = ERR_PTR(-ENOMEM);
-			goto bail_wq;
-		}
-
-		err = ib_copy_to_udata(udata, &srq->ip->offset,
-				       sizeof(srq->ip->offset));
-		if (err) {
-			ret = ERR_PTR(err);
-			goto bail_ip;
-		}
-	} else
-		srq->ip = NULL;
-
-	/*
-	 * ib_create_srq() will initialize srq->ibsrq.
-	 */
-	spin_lock_init(&srq->rq.lock);
-	srq->rq.wq->head = 0;
-	srq->rq.wq->tail = 0;
-	srq->limit = srq_init_attr->attr.srq_limit;
-
-	spin_lock(&dev->n_srqs_lock);
-	if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
-		spin_unlock(&dev->n_srqs_lock);
-		ret = ERR_PTR(-ENOMEM);
-		goto bail_ip;
-	}
-
- 	dev->n_srqs_allocated++;
-	spin_unlock(&dev->n_srqs_lock);
-
-	if (srq->ip) {
-		spin_lock_irq(&dev->pending_lock);
-		list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
-		spin_unlock_irq(&dev->pending_lock);
-	}
-
-	ret = &srq->ibsrq;
-	goto done;
-
-bail_ip:
-	kfree(srq->ip);
-bail_wq:
-	vfree(srq->rq.wq);
-bail_srq:
-	kfree(srq);
-done:
-	return ret;
-}
-
-/**
- * ipath_modify_srq - modify a shared receive queue
- * @ibsrq: the SRQ to modify
- * @attr: the new attributes of the SRQ
- * @attr_mask: indicates which attributes to modify
- * @udata: user data for ipathverbs.so
- */
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-		     enum ib_srq_attr_mask attr_mask,
-		     struct ib_udata *udata)
-{
-	struct ipath_srq *srq = to_isrq(ibsrq);
-	struct ipath_rwq *wq;
-	int ret = 0;
-
-	if (attr_mask & IB_SRQ_MAX_WR) {
-		struct ipath_rwq *owq;
-		struct ipath_rwqe *p;
-		u32 sz, size, n, head, tail;
-
-		/* Check that the requested sizes are below the limits. */
-		if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
-		    ((attr_mask & IB_SRQ_LIMIT) ?
-		     attr->srq_limit : srq->limit) > attr->max_wr) {
-			ret = -EINVAL;
-			goto bail;
-		}
-
-		sz = sizeof(struct ipath_rwqe) +
-			srq->rq.max_sge * sizeof(struct ib_sge);
-		size = attr->max_wr + 1;
-		wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
-		if (!wq) {
-			ret = -ENOMEM;
-			goto bail;
-		}
-
-		/* Check that we can write the offset to mmap. */
-		if (udata && udata->inlen >= sizeof(__u64)) {
-			__u64 offset_addr;
-			__u64 offset = 0;
-
-			ret = ib_copy_from_udata(&offset_addr, udata,
-						 sizeof(offset_addr));
-			if (ret)
-				goto bail_free;
-			udata->outbuf =
-				(void __user *) (unsigned long) offset_addr;
-			ret = ib_copy_to_udata(udata, &offset,
-					       sizeof(offset));
-			if (ret)
-				goto bail_free;
-		}
-
-		spin_lock_irq(&srq->rq.lock);
-		/*
-		 * validate head pointer value and compute
-		 * the number of remaining WQEs.
-		 */
-		owq = srq->rq.wq;
-		head = owq->head;
-		if (head >= srq->rq.size)
-			head = 0;
-		tail = owq->tail;
-		if (tail >= srq->rq.size)
-			tail = 0;
-		n = head;
-		if (n < tail)
-			n += srq->rq.size - tail;
-		else
-			n -= tail;
-		if (size <= n) {
-			ret = -EINVAL;
-			goto bail_unlock;
-		}
-		n = 0;
-		p = wq->wq;
-		while (tail != head) {
-			struct ipath_rwqe *wqe;
-			int i;
-
-			wqe = get_rwqe_ptr(&srq->rq, tail);
-			p->wr_id = wqe->wr_id;
-			p->num_sge = wqe->num_sge;
-			for (i = 0; i < wqe->num_sge; i++)
-				p->sg_list[i] = wqe->sg_list[i];
-			n++;
-			p = (struct ipath_rwqe *)((char *) p + sz);
-			if (++tail >= srq->rq.size)
-				tail = 0;
-		}
-		srq->rq.wq = wq;
-		srq->rq.size = size;
-		wq->head = n;
-		wq->tail = 0;
-		if (attr_mask & IB_SRQ_LIMIT)
-			srq->limit = attr->srq_limit;
-		spin_unlock_irq(&srq->rq.lock);
-
-		vfree(owq);
-
-		if (srq->ip) {
-			struct ipath_mmap_info *ip = srq->ip;
-			struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
-			u32 s = sizeof(struct ipath_rwq) + size * sz;
-
-			ipath_update_mmap_info(dev, ip, s, wq);
-
-			/*
-			 * Return the offset to mmap.
-			 * See ipath_mmap() for details.
-			 */
-			if (udata && udata->inlen >= sizeof(__u64)) {
-				ret = ib_copy_to_udata(udata, &ip->offset,
-						       sizeof(ip->offset));
-				if (ret)
-					goto bail;
-			}
-
-			spin_lock_irq(&dev->pending_lock);
-			if (list_empty(&ip->pending_mmaps))
-				list_add(&ip->pending_mmaps,
-					 &dev->pending_mmaps);
-			spin_unlock_irq(&dev->pending_lock);
-		}
-	} else if (attr_mask & IB_SRQ_LIMIT) {
-		spin_lock_irq(&srq->rq.lock);
-		if (attr->srq_limit >= srq->rq.size)
-			ret = -EINVAL;
-		else
-			srq->limit = attr->srq_limit;
-		spin_unlock_irq(&srq->rq.lock);
-	}
-	goto bail;
-
-bail_unlock:
-	spin_unlock_irq(&srq->rq.lock);
-bail_free:
-	vfree(wq);
-bail:
-	return ret;
-}
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
-	struct ipath_srq *srq = to_isrq(ibsrq);
-
-	attr->max_wr = srq->rq.size - 1;
-	attr->max_sge = srq->rq.max_sge;
-	attr->srq_limit = srq->limit;
-	return 0;
-}
-
-/**
- * ipath_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
- */
-int ipath_destroy_srq(struct ib_srq *ibsrq)
-{
-	struct ipath_srq *srq = to_isrq(ibsrq);
-	struct ipath_ibdev *dev = to_idev(ibsrq->device);
-
-	spin_lock(&dev->n_srqs_lock);
-	dev->n_srqs_allocated--;
-	spin_unlock(&dev->n_srqs_lock);
-	if (srq->ip)
-		kref_put(&srq->ip->ref, ipath_release_mmap_info);
-	else
-		vfree(srq->rq.wq);
-	kfree(srq);
-
-	return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_stats.c b/drivers/staging/rdma/ipath/ipath_stats.c
deleted file mode 100644
index f63e143..0000000
--- a/drivers/staging/rdma/ipath/ipath_stats.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_kernel.h"
-
-struct infinipath_stats ipath_stats;
-
-/**
- * ipath_snap_cntr - snapshot a chip counter
- * @dd: the infinipath device
- * @creg: the counter to snapshot
- *
- * called from add_timer and user counter read calls, to deal with
- * counters that wrap in "human time".  The words sent and received, and
- * the packets sent and received are all that we worry about.  For now,
- * at least, we don't worry about error counters, because if they wrap
- * that quickly, we probably don't care.  We may eventually just make this
- * handle all the counters.  word counters can wrap in about 20 seconds
- * of full bandwidth traffic, packet counters in a few hours.
- */
-
-u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
-{
-	u32 val, reg64 = 0;
-	u64 val64;
-	unsigned long t0, t1;
-	u64 ret;
-
-	t0 = jiffies;
-	/* If fast increment counters are only 32 bits, snapshot them,
-	 * and maintain them as 64bit values in the driver */
-	if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
-	    (creg == dd->ipath_cregs->cr_wordsendcnt ||
-	     creg == dd->ipath_cregs->cr_wordrcvcnt ||
-	     creg == dd->ipath_cregs->cr_pktsendcnt ||
-	     creg == dd->ipath_cregs->cr_pktrcvcnt)) {
-		val64 = ipath_read_creg(dd, creg);
-		val = val64 == ~0ULL ? ~0U : 0;
-		reg64 = 1;
-	} else			/* val64 just to keep gcc quiet... */
-		val64 = val = ipath_read_creg32(dd, creg);
-	/*
-	 * See if a second has passed.  This is just a way to detect things
-	 * that are quite broken.  Normally this should take just a few
-	 * cycles (the check is for long enough that we don't care if we get
-	 * pre-empted.)  An Opteron HT O read timeout is 4 seconds with
-	 * normal NB values
-	 */
-	t1 = jiffies;
-	if (time_before(t0 + HZ, t1) && val == -1) {
-		ipath_dev_err(dd, "Error!  Read counter 0x%x timed out\n",
-			      creg);
-		ret = 0ULL;
-		goto bail;
-	}
-	if (reg64) {
-		ret = val64;
-		goto bail;
-	}
-
-	if (creg == dd->ipath_cregs->cr_wordsendcnt) {
-		if (val != dd->ipath_lastsword) {
-			dd->ipath_sword += val - dd->ipath_lastsword;
-			dd->ipath_lastsword = val;
-		}
-		val64 = dd->ipath_sword;
-	} else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
-		if (val != dd->ipath_lastrword) {
-			dd->ipath_rword += val - dd->ipath_lastrword;
-			dd->ipath_lastrword = val;
-		}
-		val64 = dd->ipath_rword;
-	} else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
-		if (val != dd->ipath_lastspkts) {
-			dd->ipath_spkts += val - dd->ipath_lastspkts;
-			dd->ipath_lastspkts = val;
-		}
-		val64 = dd->ipath_spkts;
-	} else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
-		if (val != dd->ipath_lastrpkts) {
-			dd->ipath_rpkts += val - dd->ipath_lastrpkts;
-			dd->ipath_lastrpkts = val;
-		}
-		val64 = dd->ipath_rpkts;
-	} else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) {
-		if (dd->ibdeltainprog)
-			val64 -= val64 - dd->ibsymsnap;
-		val64 -= dd->ibsymdelta;
-	} else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) {
-		if (dd->ibdeltainprog)
-			val64 -= val64 - dd->iblnkerrsnap;
-		val64 -= dd->iblnkerrdelta;
-	} else
-		val64 = (u64) val;
-
-	ret = val64;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
- * @dd: the infinipath device
- *
- * print the delta of egrfull/hdrqfull errors for kernel ports no more than
- * every 5 seconds.  User processes are printed at close, but kernel doesn't
- * close, so...  Separate routine so may call from other places someday, and
- * so function name when printed by _IPATH_INFO is meaningfull
- */
-static void ipath_qcheck(struct ipath_devdata *dd)
-{
-	static u64 last_tot_hdrqfull;
-	struct ipath_portdata *pd = dd->ipath_pd[0];
-	size_t blen = 0;
-	char buf[128];
-	u32 hdrqtail;
-
-	*buf = 0;
-	if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
-		blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
-				pd->port_hdrqfull -
-				dd->ipath_p0_hdrqfull);
-		dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
-	}
-	if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
-		blen += snprintf(buf + blen, sizeof buf - blen,
-				 "%srcvegrfull %llu",
-				 blen ? ", " : "",
-				 (unsigned long long)
-				 (ipath_stats.sps_etidfull -
-				  dd->ipath_last_tidfull));
-		dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
-	}
-
-	/*
-	 * this is actually the number of hdrq full interrupts, not actual
-	 * events, but at the moment that's mostly what I'm interested in.
-	 * Actual count, etc. is in the counters, if needed.  For production
-	 * users this won't ordinarily be printed.
-	 */
-
-	if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
-	    ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
-		blen += snprintf(buf + blen, sizeof buf - blen,
-				 "%shdrqfull %llu (all ports)",
-				 blen ? ", " : "",
-				 (unsigned long long)
-				 (ipath_stats.sps_hdrqfull -
-				  last_tot_hdrqfull));
-		last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
-	}
-	if (blen)
-		ipath_dbg("%s\n", buf);
-
-	hdrqtail = ipath_get_hdrqtail(pd);
-	if (pd->port_head != hdrqtail) {
-		if (dd->ipath_lastport0rcv_cnt ==
-		    ipath_stats.sps_port0pkts) {
-			ipath_cdbg(PKT, "missing rcv interrupts? "
-				   "port0 hd=%x tl=%x; port0pkts %llx; write"
-				   " hd (w/intr)\n",
-				   pd->port_head, hdrqtail,
-				   (unsigned long long)
-				   ipath_stats.sps_port0pkts);
-			ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
-				dd->ipath_rhdrhead_intr_off, pd->port_port);
-		}
-		dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
-	}
-}
-
-static void ipath_chk_errormask(struct ipath_devdata *dd)
-{
-	static u32 fixed;
-	u32 ctrl;
-	unsigned long errormask;
-	unsigned long hwerrs;
-
-	if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
-		return;
-
-	errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
-
-	if (errormask == dd->ipath_errormask)
-		return;
-	fixed++;
-
-	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-		dd->ipath_errormask);
-
-	if ((hwerrs & dd->ipath_hwerrmask) ||
-		(ctrl & INFINIPATH_C_FREEZEMODE)) {
-		/* force re-interrupt of pending events, just in case */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-		dev_info(&dd->pcidev->dev,
-			"errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
-			fixed, errormask, (unsigned long)dd->ipath_errormask,
-			ctrl, hwerrs);
-	} else
-		ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
-			fixed, errormask,
-			(unsigned long)dd->ipath_errormask);
-}
-
-
-/**
- * ipath_get_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the infinipath device ipath_devdata
- *
- * called from add_timer
- */
-void ipath_get_faststats(unsigned long opaque)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-	int i;
-	static unsigned cnt;
-	unsigned long flags;
-	u64 traffic_wds;
-
-	/*
-	 * don't access the chip while running diags, or memory diags can
-	 * fail
-	 */
-	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
-	    ipath_diag_inuse)
-		/* but re-arm the timer, for diags case; won't hurt other */
-		goto done;
-
-	/*
-	 * We now try to maintain a "active timer", based on traffic
-	 * exceeding a threshold, so we need to check the word-counts
-	 * even if they are 64-bit.
-	 */
-	traffic_wds = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-	spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-	traffic_wds -= dd->ipath_traffic_wds;
-	dd->ipath_traffic_wds += traffic_wds;
-	if (traffic_wds  >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
-		atomic_add(5, &dd->ipath_active_time); /* S/B #define */
-	spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-
-	if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-	}
-
-	ipath_qcheck(dd);
-
-	/*
-	 * deal with repeat error suppression.  Doesn't really matter if
-	 * last error was almost a full interval ago, or just a few usecs
-	 * ago; still won't get more than 2 per interval.  We may want
-	 * longer intervals for this eventually, could do with mod, counter
-	 * or separate timer.  Also see code in ipath_handle_errors() and
-	 * ipath_handle_hwerrors().
-	 */
-
-	if (dd->ipath_lasterror)
-		dd->ipath_lasterror = 0;
-	if (dd->ipath_lasthwerror)
-		dd->ipath_lasthwerror = 0;
-	if (dd->ipath_maskederrs
-	    && time_after(jiffies, dd->ipath_unmasktime)) {
-		char ebuf[256];
-		int iserr;
-		iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
-					 dd->ipath_maskederrs);
-		if (dd->ipath_maskederrs &
-		    ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-		      INFINIPATH_E_PKTERRS))
-			ipath_dev_err(dd, "Re-enabling masked errors "
-				      "(%s)\n", ebuf);
-		else {
-			/*
-			 * rcvegrfull and rcvhdrqfull are "normal", for some
-			 * types of processes (mostly benchmarks) that send
-			 * huge numbers of messages, while not processing
-			 * them.  So only complain about these at debug
-			 * level.
-			 */
-			if (iserr)
-				ipath_dbg(
-					"Re-enabling queue full errors (%s)\n",
-					ebuf);
-			else
-				ipath_cdbg(ERRPKT, "Re-enabling packet"
-					" problem interrupt (%s)\n", ebuf);
-		}
-
-		/* re-enable masked errors */
-		dd->ipath_errormask |= dd->ipath_maskederrs;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-				 dd->ipath_errormask);
-		dd->ipath_maskederrs = 0;
-	}
-
-	/* limit qfull messages to ~one per minute per port */
-	if ((++cnt & 0x10)) {
-		for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
-			struct ipath_portdata *pd = dd->ipath_pd[i];
-
-			if (pd && pd->port_lastrcvhdrqtail != -1)
-				pd->port_lastrcvhdrqtail = -1;
-		}
-	}
-
-	ipath_chk_errormask(dd);
-done:
-	mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sysfs.c b/drivers/staging/rdma/ipath/ipath_sysfs.c
deleted file mode 100644
index b12b1f6..0000000
--- a/drivers/staging/rdma/ipath/ipath_sysfs.c
+++ /dev/null
@@ -1,1237 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/ctype.h>
-#include <linux/stat.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-/**
- * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
- * @str: the string containing the number
- * @valp: where to put the result
- *
- * returns the number of bytes consumed, or negative value on error
- */
-int ipath_parse_ushort(const char *str, unsigned short *valp)
-{
-	unsigned long val;
-	char *end;
-	int ret;
-
-	if (!isdigit(str[0])) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	val = simple_strtoul(str, &end, 0);
-
-	if (val > 0xffff) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	*valp = val;
-
-	ret = end + 1 - str;
-	if (ret == 0)
-		ret = -EINVAL;
-
-bail:
-	return ret;
-}
-
-static ssize_t show_version(struct device_driver *dev, char *buf)
-{
-	/* The string printed here is already newline-terminated. */
-	return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
-}
-
-static ssize_t show_num_units(struct device_driver *dev, char *buf)
-{
-	return scnprintf(buf, PAGE_SIZE, "%d\n",
-			 ipath_count_units(NULL, NULL, NULL));
-}
-
-static ssize_t show_status(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	ssize_t ret;
-
-	if (!dd->ipath_statusp) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
-			(unsigned long long) *(dd->ipath_statusp));
-
-bail:
-	return ret;
-}
-
-static const char *ipath_status_str[] = {
-	"Initted",
-	"Disabled",
-	"Admin_Disabled",
-	"", /* This used to be the old "OIB_SMA" status. */
-	"", /* This used to be the old "SMA" status. */
-	"Present",
-	"IB_link_up",
-	"IB_configured",
-	"NoIBcable",
-	"Fatal_Hardware_Error",
-	NULL,
-};
-
-static ssize_t show_status_str(struct device *dev,
-			       struct device_attribute *attr,
-			       char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int i, any;
-	u64 s;
-	ssize_t ret;
-
-	if (!dd->ipath_statusp) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	s = *(dd->ipath_statusp);
-	*buf = '\0';
-	for (any = i = 0; s && ipath_status_str[i]; i++) {
-		if (s & 1) {
-			if (any && strlcat(buf, " ", PAGE_SIZE) >=
-			    PAGE_SIZE)
-				/* overflow */
-				break;
-			if (strlcat(buf, ipath_status_str[i],
-				    PAGE_SIZE) >= PAGE_SIZE)
-				break;
-			any = 1;
-		}
-		s >>= 1;
-	}
-	if (any)
-		strlcat(buf, "\n", PAGE_SIZE);
-
-	ret = strlen(buf);
-
-bail:
-	return ret;
-}
-
-static ssize_t show_boardversion(struct device *dev,
-			       struct device_attribute *attr,
-			       char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	/* The string printed here is already newline-terminated. */
-	return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
-}
-
-static ssize_t show_localbus_info(struct device *dev,
-			       struct device_attribute *attr,
-			       char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	/* The string printed here is already newline-terminated. */
-	return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
-}
-
-static ssize_t show_lmc(struct device *dev,
-			struct device_attribute *attr,
-			char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_lmc);
-}
-
-static ssize_t store_lmc(struct device *dev,
-			 struct device_attribute *attr,
-			 const char *buf,
-			 size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	u16 lmc = 0;
-	int ret;
-
-	ret = ipath_parse_ushort(buf, &lmc);
-	if (ret < 0)
-		goto invalid;
-
-	if (lmc > 7) {
-		ret = -EINVAL;
-		goto invalid;
-	}
-
-	ipath_set_lid(dd, dd->ipath_lid, lmc);
-
-	goto bail;
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid LMC %u\n", lmc);
-bail:
-	return ret;
-}
-
-static ssize_t show_lid(struct device *dev,
-			struct device_attribute *attr,
-			char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid);
-}
-
-static ssize_t store_lid(struct device *dev,
-			 struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	u16 lid = 0;
-	int ret;
-
-	ret = ipath_parse_ushort(buf, &lid);
-	if (ret < 0)
-		goto invalid;
-
-	if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) {
-		ret = -EINVAL;
-		goto invalid;
-	}
-
-	ipath_set_lid(dd, lid, dd->ipath_lmc);
-
-	goto bail;
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid LID 0x%x\n", lid);
-bail:
-	return ret;
-}
-
-static ssize_t show_mlid(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid);
-}
-
-static ssize_t store_mlid(struct device *dev,
-			 struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	u16 mlid;
-	int ret;
-
-	ret = ipath_parse_ushort(buf, &mlid);
-	if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
-		goto invalid;
-
-	dd->ipath_mlid = mlid;
-
-	goto bail;
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid MLID\n");
-bail:
-	return ret;
-}
-
-static ssize_t show_guid(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	u8 *guid;
-
-	guid = (u8 *) & (dd->ipath_guid);
-
-	return scnprintf(buf, PAGE_SIZE,
-			 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-			 guid[0], guid[1], guid[2], guid[3],
-			 guid[4], guid[5], guid[6], guid[7]);
-}
-
-static ssize_t store_guid(struct device *dev,
-			 struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	ssize_t ret;
-	unsigned short guid[8];
-	__be64 new_guid;
-	u8 *ng;
-	int i;
-
-	if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx",
-		   &guid[0], &guid[1], &guid[2], &guid[3],
-		   &guid[4], &guid[5], &guid[6], &guid[7]) != 8)
-		goto invalid;
-
-	ng = (u8 *) &new_guid;
-
-	for (i = 0; i < 8; i++) {
-		if (guid[i] > 0xff)
-			goto invalid;
-		ng[i] = guid[i];
-	}
-
-	if (new_guid == 0)
-		goto invalid;
-
-	dd->ipath_guid = new_guid;
-	dd->ipath_nguid = 1;
-	if (dd->verbs_dev)
-		dd->verbs_dev->ibdev.node_guid = new_guid;
-
-	ret = strlen(buf);
-	goto bail;
-
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid GUID\n");
-	ret = -EINVAL;
-
-bail:
-	return ret;
-}
-
-static ssize_t show_nguid(struct device *dev,
-			  struct device_attribute *attr,
-			  char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid);
-}
-
-static ssize_t show_nports(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	/* Return the number of user ports available. */
-	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_cfgports - 1);
-}
-
-static ssize_t show_serial(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	buf[sizeof dd->ipath_serial] = '\0';
-	memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial);
-	strcat(buf, "\n");
-	return strlen(buf);
-}
-
-static ssize_t show_unit(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
-}
-
-static ssize_t show_jint_max_packets(struct device *dev,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
-}
-
-static ssize_t store_jint_max_packets(struct device *dev,
-				      struct device_attribute *attr,
-				      const char *buf,
-				      size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	u16 v = 0;
-	int ret;
-
-	ret = ipath_parse_ushort(buf, &v);
-	if (ret < 0)
-		ipath_dev_err(dd, "invalid jint_max_packets.\n");
-	else
-		dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
-
-	return ret;
-}
-
-static ssize_t show_jint_idle_ticks(struct device *dev,
-				    struct device_attribute *attr,
-				    char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-	return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
-}
-
-static ssize_t store_jint_idle_ticks(struct device *dev,
-				     struct device_attribute *attr,
-				     const char *buf,
-				     size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	u16 v = 0;
-	int ret;
-
-	ret = ipath_parse_ushort(buf, &v);
-	if (ret < 0)
-		ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
-	else
-		dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
-
-	return ret;
-}
-
-#define DEVICE_COUNTER(name, attr) \
-	static ssize_t show_counter_##name(struct device *dev, \
-					   struct device_attribute *attr, \
-					   char *buf) \
-	{ \
-		struct ipath_devdata *dd = dev_get_drvdata(dev); \
-		return scnprintf(\
-			buf, PAGE_SIZE, "%llu\n", (unsigned long long) \
-			ipath_snap_cntr( \
-				dd, offsetof(struct infinipath_counters, \
-					     attr) / sizeof(u64)));	\
-	} \
-	static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL);
-
-DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt);
-DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt);
-DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt);
-DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt);
-DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt);
-DEVICE_COUNTER(lb_ints, LBIntCnt);
-DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt);
-DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt);
-DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt);
-DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt);
-DEVICE_COUNTER(rx_dwords, RxDwordCnt);
-DEVICE_COUNTER(rx_ebps, RxEBPCnt);
-DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt);
-DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt);
-DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt);
-DEVICE_COUNTER(rx_len_errs, RxLenErrCnt);
-DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt);
-DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt);
-DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt);
-DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt);
-DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt);
-DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt);
-DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt);
-DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt);
-DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt);
-DEVICE_COUNTER(tx_dwords, TxDwordCnt);
-DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt);
-DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt);
-DEVICE_COUNTER(tx_len_errs, TxLenErrCnt);
-DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt);
-DEVICE_COUNTER(tx_underruns, TxUnderrunCnt);
-DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt);
-
-static struct attribute *dev_counter_attributes[] = {
-	&dev_attr_ib_link_downeds.attr,
-	&dev_attr_ib_link_err_recoveries.attr,
-	&dev_attr_ib_status_changes.attr,
-	&dev_attr_ib_symbol_errs.attr,
-	&dev_attr_lb_flow_stalls.attr,
-	&dev_attr_lb_ints.attr,
-	&dev_attr_rx_bad_formats.attr,
-	&dev_attr_rx_buf_ovfls.attr,
-	&dev_attr_rx_data_pkts.attr,
-	&dev_attr_rx_dropped_pkts.attr,
-	&dev_attr_rx_dwords.attr,
-	&dev_attr_rx_ebps.attr,
-	&dev_attr_rx_flow_ctrl_errs.attr,
-	&dev_attr_rx_flow_pkts.attr,
-	&dev_attr_rx_icrc_errs.attr,
-	&dev_attr_rx_len_errs.attr,
-	&dev_attr_rx_link_problems.attr,
-	&dev_attr_rx_lpcrc_errs.attr,
-	&dev_attr_rx_max_min_len_errs.attr,
-	&dev_attr_rx_p0_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p1_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p2_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p3_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p4_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p5_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p6_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p7_hdr_egr_ovfls.attr,
-	&dev_attr_rx_p8_hdr_egr_ovfls.attr,
-	&dev_attr_rx_pkey_mismatches.attr,
-	&dev_attr_rx_tid_full_errs.attr,
-	&dev_attr_rx_tid_valid_errs.attr,
-	&dev_attr_rx_vcrc_errs.attr,
-	&dev_attr_tx_data_pkts.attr,
-	&dev_attr_tx_dropped_pkts.attr,
-	&dev_attr_tx_dwords.attr,
-	&dev_attr_tx_flow_pkts.attr,
-	&dev_attr_tx_flow_stalls.attr,
-	&dev_attr_tx_len_errs.attr,
-	&dev_attr_tx_max_min_len_errs.attr,
-	&dev_attr_tx_underruns.attr,
-	&dev_attr_tx_unsup_vl_errs.attr,
-	NULL
-};
-
-static struct attribute_group dev_counter_attr_group = {
-	.name = "counters",
-	.attrs = dev_counter_attributes
-};
-
-static ssize_t store_reset(struct device *dev,
-			 struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	if (count < 5 || memcmp(buf, "reset", 5)) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	if (dd->ipath_flags & IPATH_DISABLED) {
-		/*
-		 * post-reset init would re-enable interrupts, etc.
-		 * so don't allow reset on disabled devices.  Not
-		 * perfect error, but about the best choice.
-		 */
-		dev_info(dev,"Unit %d is disabled, can't reset\n",
-			 dd->ipath_unit);
-		ret = -EINVAL;
-		goto bail;
-	}
-	ret = ipath_reset_device(dd->ipath_unit);
-bail:
-	return ret<0 ? ret : count;
-}
-
-static ssize_t store_link_state(struct device *dev,
-			 struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, r;
-	u16 state;
-
-	ret = ipath_parse_ushort(buf, &state);
-	if (ret < 0)
-		goto invalid;
-
-	r = ipath_set_linkstate(dd, state);
-	if (r < 0) {
-		ret = r;
-		goto bail;
-	}
-
-	goto bail;
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid link state\n");
-bail:
-	return ret;
-}
-
-static ssize_t show_mtu(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu);
-}
-
-static ssize_t store_mtu(struct device *dev,
-			 struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	ssize_t ret;
-	u16 mtu = 0;
-	int r;
-
-	ret = ipath_parse_ushort(buf, &mtu);
-	if (ret < 0)
-		goto invalid;
-
-	r = ipath_set_mtu(dd, mtu);
-	if (r < 0)
-		ret = r;
-
-	goto bail;
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid MTU\n");
-bail:
-	return ret;
-}
-
-static ssize_t show_enabled(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	return scnprintf(buf, PAGE_SIZE, "%u\n",
-			 (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1);
-}
-
-static ssize_t store_enabled(struct device *dev,
-			 struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	ssize_t ret;
-	u16 enable = 0;
-
-	ret = ipath_parse_ushort(buf, &enable);
-	if (ret < 0) {
-		ipath_dev_err(dd, "attempt to use non-numeric on enable\n");
-		goto bail;
-	}
-
-	if (enable) {
-		if (!(dd->ipath_flags & IPATH_DISABLED))
-			goto bail;
-
-		dev_info(dev, "Enabling unit %d\n", dd->ipath_unit);
-		/* same as post-reset */
-		ret = ipath_init_chip(dd, 1);
-		if (ret)
-			ipath_dev_err(dd, "Failed to enable unit %d\n",
-				      dd->ipath_unit);
-		else {
-			dd->ipath_flags &= ~IPATH_DISABLED;
-			*dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED;
-		}
-	} else if (!(dd->ipath_flags & IPATH_DISABLED)) {
-		dev_info(dev, "Disabling unit %d\n", dd->ipath_unit);
-		ipath_shutdown_device(dd);
-		dd->ipath_flags |= IPATH_DISABLED;
-		*dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED;
-	}
-
-bail:
-	return ret;
-}
-
-static ssize_t store_rx_pol_inv(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, r;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret < 0)
-		goto invalid;
-
-	r = ipath_set_rx_pol_inv(dd, val);
-	if (r < 0) {
-		ret = r;
-		goto bail;
-	}
-
-	goto bail;
-invalid:
-	ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
-bail:
-	return ret;
-}
-
-static ssize_t store_led_override(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret > 0)
-		ipath_set_led_override(dd, val);
-	else
-		ipath_dev_err(dd, "attempt to set invalid LED override\n");
-	return ret;
-}
-
-static ssize_t show_logged_errs(struct device *dev,
-				struct device_attribute *attr,
-				char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int idx, count;
-
-	/* force consistency with actual EEPROM */
-	if (ipath_update_eeprom_log(dd) != 0)
-		return -ENXIO;
-
-	count = 0;
-	for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-		count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-			dd->ipath_eep_st_errs[idx],
-			idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' ');
-	}
-
-	return count;
-}
-
-/*
- * New sysfs entries to control various IB config. These all turn into
- * accesses via ipath_f_get/set_ib_cfg.
- *
- * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
- */
-static ssize_t show_hrtbt_enb(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
-	if (ret >= 0)
-		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-	return ret;
-}
-
-static ssize_t store_hrtbt_enb(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, r;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret >= 0 && val > 3)
-		ret = -EINVAL;
-	if (ret < 0) {
-		ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
-		goto bail;
-	}
-
-	/*
-	 * Set the "intentional" heartbeat enable per either of
-	 * "Enable" and "Auto", as these are normally set together.
-	 * This bit is consulted when leaving loopback mode,
-	 * because entering loopback mode overrides it and automatically
-	 * disables heartbeat.
-	 */
-	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
-	if (r < 0)
-		ret = r;
-	else if (val == IPATH_IB_HRTBT_OFF)
-		dd->ipath_flags |= IPATH_NO_HRTBT;
-	else
-		dd->ipath_flags &= ~IPATH_NO_HRTBT;
-
-bail:
-	return ret;
-}
-
-/*
- * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
- * _not_ the particular encoding of any given chip)
- */
-static ssize_t show_lwid_enb(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
-	if (ret >= 0)
-		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-	return ret;
-}
-
-static ssize_t store_lwid_enb(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, r;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret >= 0 && (val == 0 || val > 3))
-		ret = -EINVAL;
-	if (ret < 0) {
-		ipath_dev_err(dd,
-			"attempt to set invalid Link Width (enable)\n");
-		goto bail;
-	}
-
-	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
-	if (r < 0)
-		ret = r;
-
-bail:
-	return ret;
-}
-
-/* Get current link width */
-static ssize_t show_lwid(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
-	if (ret >= 0)
-		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-	return ret;
-}
-
-/*
- * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
- */
-static ssize_t show_spd_enb(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
-	if (ret >= 0)
-		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-	return ret;
-}
-
-static ssize_t store_spd_enb(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, r;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
-		ret = -EINVAL;
-	if (ret < 0) {
-		ipath_dev_err(dd,
-			"attempt to set invalid Link Speed (enable)\n");
-		goto bail;
-	}
-
-	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
-	if (r < 0)
-		ret = r;
-
-bail:
-	return ret;
-}
-
-/* Get current link speed */
-static ssize_t show_spd(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
-	if (ret >= 0)
-		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-	return ret;
-}
-
-/*
- * Get/Set RX polarity-invert enable. 0=no, 1=yes.
- */
-static ssize_t show_rx_polinv_enb(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
-	if (ret >= 0)
-		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-	return ret;
-}
-
-static ssize_t store_rx_polinv_enb(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, r;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret >= 0 && val > 1) {
-		ipath_dev_err(dd,
-			"attempt to set invalid Rx Polarity (enable)\n");
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
-	if (r < 0)
-		ret = r;
-
-bail:
-	return ret;
-}
-
-/*
- * Get/Set RX lane-reversal enable. 0=no, 1=yes.
- */
-static ssize_t show_lanerev_enb(struct device *dev,
-			 struct device_attribute *attr,
-			 char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-
-	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
-	if (ret >= 0)
-		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-	return ret;
-}
-
-static ssize_t store_lanerev_enb(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf,
-			  size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, r;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret >= 0 && val > 1) {
-		ret = -EINVAL;
-		ipath_dev_err(dd,
-			"attempt to set invalid Lane reversal (enable)\n");
-		goto bail;
-	}
-
-	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
-	if (r < 0)
-		ret = r;
-
-bail:
-	return ret;
-}
-
-static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
-static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
-
-static struct attribute *driver_attributes[] = {
-	&driver_attr_num_units.attr,
-	&driver_attr_version.attr,
-	NULL
-};
-
-static struct attribute_group driver_attr_group = {
-	.attrs = driver_attributes
-};
-
-static ssize_t store_tempsense(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf,
-			       size_t count)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret, stat;
-	u16 val;
-
-	ret = ipath_parse_ushort(buf, &val);
-	if (ret <= 0) {
-		ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
-		goto bail;
-	}
-	/* If anything but the highest limit, enable T_CRIT_A "interrupt" */
-	stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
-	if (stat) {
-		ipath_dev_err(dd, "Unable to set tempsense config\n");
-		ret = -1;
-		goto bail;
-	}
-	stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
-	if (stat) {
-		ipath_dev_err(dd, "Unable to set local Tcrit\n");
-		ret = -1;
-		goto bail;
-	}
-	stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
-	if (stat) {
-		ipath_dev_err(dd, "Unable to set remote Tcrit\n");
-		ret = -1;
-		goto bail;
-	}
-
-bail:
-	return ret;
-}
-
-/*
- * dump tempsense regs. in decimal, to ease shell-scripts.
- */
-static ssize_t show_tempsense(struct device *dev,
-			      struct device_attribute *attr,
-			      char *buf)
-{
-	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	int ret;
-	int idx;
-	u8 regvals[8];
-
-	ret = -ENXIO;
-	for (idx = 0; idx < 8; ++idx) {
-		if (idx == 6)
-			continue;
-		ret = ipath_tempsense_read(dd, idx);
-		if (ret < 0)
-			break;
-		regvals[idx] = ret;
-	}
-	if (idx == 8)
-		ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
-			*(signed char *)(regvals),
-			*(signed char *)(regvals + 1),
-			regvals[2], regvals[3],
-			*(signed char *)(regvals + 5),
-			*(signed char *)(regvals + 7));
-	return ret;
-}
-
-const struct attribute_group *ipath_driver_attr_groups[] = {
-	&driver_attr_group,
-	NULL,
-};
-
-static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
-static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
-static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
-static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state);
-static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid);
-static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu);
-static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled);
-static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL);
-static DEVICE_ATTR(nports, S_IRUGO, show_nports, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
-static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
-static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
-static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
-		   show_jint_max_packets, store_jint_max_packets);
-static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
-		   show_jint_idle_ticks, store_jint_idle_ticks);
-static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
-		   show_tempsense, store_tempsense);
-
-static struct attribute *dev_attributes[] = {
-	&dev_attr_guid.attr,
-	&dev_attr_lmc.attr,
-	&dev_attr_lid.attr,
-	&dev_attr_link_state.attr,
-	&dev_attr_mlid.attr,
-	&dev_attr_mtu.attr,
-	&dev_attr_nguid.attr,
-	&dev_attr_nports.attr,
-	&dev_attr_serial.attr,
-	&dev_attr_status.attr,
-	&dev_attr_status_str.attr,
-	&dev_attr_boardversion.attr,
-	&dev_attr_unit.attr,
-	&dev_attr_enabled.attr,
-	&dev_attr_rx_pol_inv.attr,
-	&dev_attr_led_override.attr,
-	&dev_attr_logged_errors.attr,
-	&dev_attr_tempsense.attr,
-	&dev_attr_localbus_info.attr,
-	NULL
-};
-
-static struct attribute_group dev_attr_group = {
-	.attrs = dev_attributes
-};
-
-static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
-		   store_hrtbt_enb);
-static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
-		   store_lwid_enb);
-static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
-static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
-		   store_spd_enb);
-static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
-static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
-		   store_rx_polinv_enb);
-static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
-		   store_lanerev_enb);
-
-static struct attribute *dev_ibcfg_attributes[] = {
-	&dev_attr_hrtbt_enable.attr,
-	&dev_attr_link_width_enable.attr,
-	&dev_attr_link_width.attr,
-	&dev_attr_link_speed_enable.attr,
-	&dev_attr_link_speed.attr,
-	&dev_attr_rx_pol_inv_enable.attr,
-	&dev_attr_rx_lane_rev_enable.attr,
-	NULL
-};
-
-static struct attribute_group dev_ibcfg_attr_group = {
-	.attrs = dev_ibcfg_attributes
-};
-
-/**
- * ipath_expose_reset - create a device reset file
- * @dev: the device structure
- *
- * Only expose a file that lets us reset the device after someone
- * enters diag mode.  A device reset is quite likely to crash the
- * machine entirely, so we don't want to normally make it
- * available.
- *
- * Called with ipath_mutex held.
- */
-int ipath_expose_reset(struct device *dev)
-{
-	static int exposed;
-	int ret;
-
-	if (!exposed) {
-		ret = device_create_file(dev, &dev_attr_reset);
-		exposed = 1;
-	} else {
-		ret = 0;
-	}
-
-	return ret;
-}
-
-int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
-{
-	int ret;
-
-	ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
-	if (ret)
-		goto bail;
-
-	ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group);
-	if (ret)
-		goto bail_attrs;
-
-	if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-		ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
-		if (ret)
-			goto bail_counter;
-		ret = device_create_file(dev, &dev_attr_jint_max_packets);
-		if (ret)
-			goto bail_idle;
-
-		ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
-		if (ret)
-			goto bail_max;
-	}
-
-	return 0;
-
-bail_max:
-	device_remove_file(dev, &dev_attr_jint_max_packets);
-bail_idle:
-	device_remove_file(dev, &dev_attr_jint_idle_ticks);
-bail_counter:
-	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-bail_attrs:
-	sysfs_remove_group(&dev->kobj, &dev_attr_group);
-bail:
-	return ret;
-}
-
-void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
-{
-	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-
-	if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-		sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
-		device_remove_file(dev, &dev_attr_jint_idle_ticks);
-		device_remove_file(dev, &dev_attr_jint_max_packets);
-	}
-
-	sysfs_remove_group(&dev->kobj, &dev_attr_group);
-
-	device_remove_file(dev, &dev_attr_reset);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c
deleted file mode 100644
index 0246b30..0000000
--- a/drivers/staging/rdma/ipath/ipath_uc.c
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/**
- * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_uc_req(struct ipath_qp *qp)
-{
-	struct ipath_other_headers *ohdr;
-	struct ipath_swqe *wqe;
-	unsigned long flags;
-	u32 hwords;
-	u32 bth0;
-	u32 len;
-	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-	int ret = 0;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
-		if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-			goto bail;
-		/* We are in the error state, flush the work request. */
-		if (qp->s_last == qp->s_head)
-			goto bail;
-		/* If DMAs are in progress, we can't flush immediately. */
-		if (atomic_read(&qp->s_dma_busy)) {
-			qp->s_flags |= IPATH_S_WAIT_DMA;
-			goto bail;
-		}
-		wqe = get_swqe_ptr(qp, qp->s_last);
-		ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-		goto done;
-	}
-
-	ohdr = &qp->s_hdr.u.oth;
-	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-		ohdr = &qp->s_hdr.u.l.oth;
-
-	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
-	hwords = 5;
-	bth0 = 1 << 22; /* Set M bit */
-
-	/* Get the next send request. */
-	wqe = get_swqe_ptr(qp, qp->s_cur);
-	qp->s_wqe = NULL;
-	switch (qp->s_state) {
-	default:
-		if (!(ib_ipath_state_ops[qp->state] &
-		    IPATH_PROCESS_NEXT_SEND_OK))
-			goto bail;
-		/* Check if send work queue is empty. */
-		if (qp->s_cur == qp->s_head)
-			goto bail;
-		/*
-		 * Start a new request.
-		 */
-		qp->s_psn = wqe->psn = qp->s_next_psn;
-		qp->s_sge.sge = wqe->sg_list[0];
-		qp->s_sge.sg_list = wqe->sg_list + 1;
-		qp->s_sge.num_sge = wqe->wr.num_sge;
-		qp->s_len = len = wqe->length;
-		switch (wqe->wr.opcode) {
-		case IB_WR_SEND:
-		case IB_WR_SEND_WITH_IMM:
-			if (len > pmtu) {
-				qp->s_state = OP(SEND_FIRST);
-				len = pmtu;
-				break;
-			}
-			if (wqe->wr.opcode == IB_WR_SEND)
-				qp->s_state = OP(SEND_ONLY);
-			else {
-				qp->s_state =
-					OP(SEND_ONLY_WITH_IMMEDIATE);
-				/* Immediate data comes after the BTH */
-				ohdr->u.imm_data = wqe->wr.ex.imm_data;
-				hwords += 1;
-			}
-			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-				bth0 |= 1 << 23;
-			qp->s_wqe = wqe;
-			if (++qp->s_cur >= qp->s_size)
-				qp->s_cur = 0;
-			break;
-
-		case IB_WR_RDMA_WRITE:
-		case IB_WR_RDMA_WRITE_WITH_IMM:
-			ohdr->u.rc.reth.vaddr =
-				cpu_to_be64(wqe->rdma_wr.remote_addr);
-			ohdr->u.rc.reth.rkey =
-				cpu_to_be32(wqe->rdma_wr.rkey);
-			ohdr->u.rc.reth.length = cpu_to_be32(len);
-			hwords += sizeof(struct ib_reth) / 4;
-			if (len > pmtu) {
-				qp->s_state = OP(RDMA_WRITE_FIRST);
-				len = pmtu;
-				break;
-			}
-			if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-				qp->s_state = OP(RDMA_WRITE_ONLY);
-			else {
-				qp->s_state =
-					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-				/* Immediate data comes after the RETH */
-				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
-				hwords += 1;
-				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-					bth0 |= 1 << 23;
-			}
-			qp->s_wqe = wqe;
-			if (++qp->s_cur >= qp->s_size)
-				qp->s_cur = 0;
-			break;
-
-		default:
-			goto bail;
-		}
-		break;
-
-	case OP(SEND_FIRST):
-		qp->s_state = OP(SEND_MIDDLE);
-		/* FALLTHROUGH */
-	case OP(SEND_MIDDLE):
-		len = qp->s_len;
-		if (len > pmtu) {
-			len = pmtu;
-			break;
-		}
-		if (wqe->wr.opcode == IB_WR_SEND)
-			qp->s_state = OP(SEND_LAST);
-		else {
-			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
-			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.ex.imm_data;
-			hwords += 1;
-		}
-		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-			bth0 |= 1 << 23;
-		qp->s_wqe = wqe;
-		if (++qp->s_cur >= qp->s_size)
-			qp->s_cur = 0;
-		break;
-
-	case OP(RDMA_WRITE_FIRST):
-		qp->s_state = OP(RDMA_WRITE_MIDDLE);
-		/* FALLTHROUGH */
-	case OP(RDMA_WRITE_MIDDLE):
-		len = qp->s_len;
-		if (len > pmtu) {
-			len = pmtu;
-			break;
-		}
-		if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-			qp->s_state = OP(RDMA_WRITE_LAST);
-		else {
-			qp->s_state =
-				OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
-			/* Immediate data comes after the BTH */
-			ohdr->u.imm_data = wqe->wr.ex.imm_data;
-			hwords += 1;
-			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-				bth0 |= 1 << 23;
-		}
-		qp->s_wqe = wqe;
-		if (++qp->s_cur >= qp->s_size)
-			qp->s_cur = 0;
-		break;
-	}
-	qp->s_len -= len;
-	qp->s_hdrwords = hwords;
-	qp->s_cur_sge = &qp->s_sge;
-	qp->s_cur_size = len;
-	ipath_make_ruc_header(to_idev(qp->ibqp.device),
-			      qp, ohdr, bth0 | (qp->s_state << 24),
-			      qp->s_next_psn++ & IPATH_PSN_MASK);
-done:
-	ret = 1;
-	goto unlock;
-
-bail:
-	qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	return ret;
-}
-
-/**
- * ipath_uc_rcv - handle an incoming UC packet
- * @dev: the device the packet came in on
- * @hdr: the header of the packet
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
- *
- * This is called from ipath_qp_rcv() to process an incoming UC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-	struct ipath_other_headers *ohdr;
-	int opcode;
-	u32 hdrsize;
-	u32 psn;
-	u32 pad;
-	struct ib_wc wc;
-	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-	struct ib_reth *reth;
-	int header_in_data;
-
-	/* Validate the SLID. See Ch. 9.6.1.5 */
-	if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
-		goto done;
-
-	/* Check for GRH */
-	if (!has_grh) {
-		ohdr = &hdr->u.oth;
-		hdrsize = 8 + 12;	/* LRH + BTH */
-		psn = be32_to_cpu(ohdr->bth[2]);
-		header_in_data = 0;
-	} else {
-		ohdr = &hdr->u.l.oth;
-		hdrsize = 8 + 40 + 12;	/* LRH + GRH + BTH */
-		/*
-		 * The header with GRH is 60 bytes and the
-		 * core driver sets the eager header buffer
-		 * size to 56 bytes so the last 4 bytes of
-		 * the BTH header (PSN) is in the data buffer.
-		 */
-		header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-		if (header_in_data) {
-			psn = be32_to_cpu(((__be32 *) data)[0]);
-			data += sizeof(__be32);
-		} else
-			psn = be32_to_cpu(ohdr->bth[2]);
-	}
-	/*
-	 * The opcode is in the low byte when its in network order
-	 * (top byte when in host order).
-	 */
-	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
-	memset(&wc, 0, sizeof wc);
-
-	/* Compare the PSN verses the expected PSN. */
-	if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
-		/*
-		 * Handle a sequence error.
-		 * Silently drop any current message.
-		 */
-		qp->r_psn = psn;
-	inv:
-		qp->r_state = OP(SEND_LAST);
-		switch (opcode) {
-		case OP(SEND_FIRST):
-		case OP(SEND_ONLY):
-		case OP(SEND_ONLY_WITH_IMMEDIATE):
-			goto send_first;
-
-		case OP(RDMA_WRITE_FIRST):
-		case OP(RDMA_WRITE_ONLY):
-		case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
-			goto rdma_first;
-
-		default:
-			dev->n_pkt_drops++;
-			goto done;
-		}
-	}
-
-	/* Check for opcode sequence errors. */
-	switch (qp->r_state) {
-	case OP(SEND_FIRST):
-	case OP(SEND_MIDDLE):
-		if (opcode == OP(SEND_MIDDLE) ||
-		    opcode == OP(SEND_LAST) ||
-		    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
-			break;
-		goto inv;
-
-	case OP(RDMA_WRITE_FIRST):
-	case OP(RDMA_WRITE_MIDDLE):
-		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
-		    opcode == OP(RDMA_WRITE_LAST) ||
-		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-			break;
-		goto inv;
-
-	default:
-		if (opcode == OP(SEND_FIRST) ||
-		    opcode == OP(SEND_ONLY) ||
-		    opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
-		    opcode == OP(RDMA_WRITE_FIRST) ||
-		    opcode == OP(RDMA_WRITE_ONLY) ||
-		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-			break;
-		goto inv;
-	}
-
-	/* OK, process the packet. */
-	switch (opcode) {
-	case OP(SEND_FIRST):
-	case OP(SEND_ONLY):
-	case OP(SEND_ONLY_WITH_IMMEDIATE):
-	send_first:
-		if (qp->r_flags & IPATH_R_REUSE_SGE) {
-			qp->r_flags &= ~IPATH_R_REUSE_SGE;
-			qp->r_sge = qp->s_rdma_read_sge;
-		} else if (!ipath_get_rwqe(qp, 0)) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		/* Save the WQE so we can reuse it in case of an error. */
-		qp->s_rdma_read_sge = qp->r_sge;
-		qp->r_rcv_len = 0;
-		if (opcode == OP(SEND_ONLY))
-			goto send_last;
-		else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
-			goto send_last_imm;
-		/* FALLTHROUGH */
-	case OP(SEND_MIDDLE):
-		/* Check for invalid length PMTU or posted rwqe len. */
-		if (unlikely(tlen != (hdrsize + pmtu + 4))) {
-			qp->r_flags |= IPATH_R_REUSE_SGE;
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		qp->r_rcv_len += pmtu;
-		if (unlikely(qp->r_rcv_len > qp->r_len)) {
-			qp->r_flags |= IPATH_R_REUSE_SGE;
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		ipath_copy_sge(&qp->r_sge, data, pmtu);
-		break;
-
-	case OP(SEND_LAST_WITH_IMMEDIATE):
-	send_last_imm:
-		if (header_in_data) {
-			wc.ex.imm_data = *(__be32 *) data;
-			data += sizeof(__be32);
-		} else {
-			/* Immediate data comes after BTH */
-			wc.ex.imm_data = ohdr->u.imm_data;
-		}
-		hdrsize += 4;
-		wc.wc_flags = IB_WC_WITH_IMM;
-		/* FALLTHROUGH */
-	case OP(SEND_LAST):
-	send_last:
-		/* Get the number of bytes the message was padded by. */
-		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-		/* Check for invalid length. */
-		/* XXX LAST len should be >= 1 */
-		if (unlikely(tlen < (hdrsize + pad + 4))) {
-			qp->r_flags |= IPATH_R_REUSE_SGE;
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		/* Don't count the CRC. */
-		tlen -= (hdrsize + pad + 4);
-		wc.byte_len = tlen + qp->r_rcv_len;
-		if (unlikely(wc.byte_len > qp->r_len)) {
-			qp->r_flags |= IPATH_R_REUSE_SGE;
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		wc.opcode = IB_WC_RECV;
-	last_imm:
-		ipath_copy_sge(&qp->r_sge, data, tlen);
-		wc.wr_id = qp->r_wr_id;
-		wc.status = IB_WC_SUCCESS;
-		wc.qp = &qp->ibqp;
-		wc.src_qp = qp->remote_qpn;
-		wc.slid = qp->remote_ah_attr.dlid;
-		wc.sl = qp->remote_ah_attr.sl;
-		/* Signal completion event if the solicited bit is set. */
-		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-			       (ohdr->bth[0] &
-				cpu_to_be32(1 << 23)) != 0);
-		break;
-
-	case OP(RDMA_WRITE_FIRST):
-	case OP(RDMA_WRITE_ONLY):
-	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
-	rdma_first:
-		/* RETH comes after BTH */
-		if (!header_in_data)
-			reth = &ohdr->u.rc.reth;
-		else {
-			reth = (struct ib_reth *)data;
-			data += sizeof(*reth);
-		}
-		hdrsize += sizeof(*reth);
-		qp->r_len = be32_to_cpu(reth->length);
-		qp->r_rcv_len = 0;
-		if (qp->r_len != 0) {
-			u32 rkey = be32_to_cpu(reth->rkey);
-			u64 vaddr = be64_to_cpu(reth->vaddr);
-			int ok;
-
-			/* Check rkey */
-			ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
-					   vaddr, rkey,
-					   IB_ACCESS_REMOTE_WRITE);
-			if (unlikely(!ok)) {
-				dev->n_pkt_drops++;
-				goto done;
-			}
-		} else {
-			qp->r_sge.sg_list = NULL;
-			qp->r_sge.sge.mr = NULL;
-			qp->r_sge.sge.vaddr = NULL;
-			qp->r_sge.sge.length = 0;
-			qp->r_sge.sge.sge_length = 0;
-		}
-		if (unlikely(!(qp->qp_access_flags &
-			       IB_ACCESS_REMOTE_WRITE))) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		if (opcode == OP(RDMA_WRITE_ONLY))
-			goto rdma_last;
-		else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-			goto rdma_last_imm;
-		/* FALLTHROUGH */
-	case OP(RDMA_WRITE_MIDDLE):
-		/* Check for invalid length PMTU or posted rwqe len. */
-		if (unlikely(tlen != (hdrsize + pmtu + 4))) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		qp->r_rcv_len += pmtu;
-		if (unlikely(qp->r_rcv_len > qp->r_len)) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		ipath_copy_sge(&qp->r_sge, data, pmtu);
-		break;
-
-	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-	rdma_last_imm:
-		if (header_in_data) {
-			wc.ex.imm_data = *(__be32 *) data;
-			data += sizeof(__be32);
-		} else {
-			/* Immediate data comes after BTH */
-			wc.ex.imm_data = ohdr->u.imm_data;
-		}
-		hdrsize += 4;
-		wc.wc_flags = IB_WC_WITH_IMM;
-
-		/* Get the number of bytes the message was padded by. */
-		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-		/* Check for invalid length. */
-		/* XXX LAST len should be >= 1 */
-		if (unlikely(tlen < (hdrsize + pad + 4))) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		/* Don't count the CRC. */
-		tlen -= (hdrsize + pad + 4);
-		if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		if (qp->r_flags & IPATH_R_REUSE_SGE)
-			qp->r_flags &= ~IPATH_R_REUSE_SGE;
-		else if (!ipath_get_rwqe(qp, 1)) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		wc.byte_len = qp->r_len;
-		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-		goto last_imm;
-
-	case OP(RDMA_WRITE_LAST):
-	rdma_last:
-		/* Get the number of bytes the message was padded by. */
-		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-		/* Check for invalid length. */
-		/* XXX LAST len should be >= 1 */
-		if (unlikely(tlen < (hdrsize + pad + 4))) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		/* Don't count the CRC. */
-		tlen -= (hdrsize + pad + 4);
-		if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
-			dev->n_pkt_drops++;
-			goto done;
-		}
-		ipath_copy_sge(&qp->r_sge, data, tlen);
-		break;
-
-	default:
-		/* Drop packet for unknown opcodes. */
-		dev->n_pkt_drops++;
-		goto done;
-	}
-	qp->r_psn++;
-	qp->r_state = opcode;
-done:
-	return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c
deleted file mode 100644
index 385d941..0000000
--- a/drivers/staging/rdma/ipath/ipath_ud.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from ipath_make_ud_req() to forward a WQE addressed
- * to the same HCA.
- * Note that the receive interrupt handler may be calling ipath_ud_rcv()
- * while this is being called.
- */
-static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
-{
-	struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
-	struct ipath_qp *qp;
-	struct ib_ah_attr *ah_attr;
-	unsigned long flags;
-	struct ipath_rq *rq;
-	struct ipath_srq *srq;
-	struct ipath_sge_state rsge;
-	struct ipath_sge *sge;
-	struct ipath_rwq *wq;
-	struct ipath_rwqe *wqe;
-	void (*handler)(struct ib_event *, void *);
-	struct ib_wc wc;
-	u32 tail;
-	u32 rlen;
-	u32 length;
-
-	qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn);
-	if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-		dev->n_pkt_drops++;
-		goto done;
-	}
-
-	/*
-	 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
-	 * Qkeys with the high order bit set mean use the
-	 * qkey from the QP context instead of the WR (see 10.2.5).
-	 */
-	if (unlikely(qp->ibqp.qp_num &&
-		     ((int) swqe->ud_wr.remote_qkey < 0 ?
-		      sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) {
-		/* XXX OK to lose a count once in a while. */
-		dev->qkey_violations++;
-		dev->n_pkt_drops++;
-		goto drop;
-	}
-
-	/*
-	 * A GRH is expected to precede the data even if not
-	 * present on the wire.
-	 */
-	length = swqe->length;
-	memset(&wc, 0, sizeof wc);
-	wc.byte_len = length + sizeof(struct ib_grh);
-
-	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
-		wc.wc_flags = IB_WC_WITH_IMM;
-		wc.ex.imm_data = swqe->wr.ex.imm_data;
-	}
-
-	/*
-	 * This would be a lot simpler if we could call ipath_get_rwqe()
-	 * but that uses state that the receive interrupt handler uses
-	 * so we would need to lock out receive interrupts while doing
-	 * local loopback.
-	 */
-	if (qp->ibqp.srq) {
-		srq = to_isrq(qp->ibqp.srq);
-		handler = srq->ibsrq.event_handler;
-		rq = &srq->rq;
-	} else {
-		srq = NULL;
-		handler = NULL;
-		rq = &qp->r_rq;
-	}
-
-	/*
-	 * Get the next work request entry to find where to put the data.
-	 * Note that it is safe to drop the lock after changing rq->tail
-	 * since ipath_post_receive() won't fill the empty slot.
-	 */
-	spin_lock_irqsave(&rq->lock, flags);
-	wq = rq->wq;
-	tail = wq->tail;
-	/* Validate tail before using it since it is user writable. */
-	if (tail >= rq->size)
-		tail = 0;
-	if (unlikely(tail == wq->head)) {
-		spin_unlock_irqrestore(&rq->lock, flags);
-		dev->n_pkt_drops++;
-		goto drop;
-	}
-	wqe = get_rwqe_ptr(rq, tail);
-	rsge.sg_list = qp->r_ud_sg_list;
-	if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
-		spin_unlock_irqrestore(&rq->lock, flags);
-		dev->n_pkt_drops++;
-		goto drop;
-	}
-	/* Silently drop packets which are too big. */
-	if (wc.byte_len > rlen) {
-		spin_unlock_irqrestore(&rq->lock, flags);
-		dev->n_pkt_drops++;
-		goto drop;
-	}
-	if (++tail >= rq->size)
-		tail = 0;
-	wq->tail = tail;
-	wc.wr_id = wqe->wr_id;
-	if (handler) {
-		u32 n;
-
-		/*
-		 * validate head pointer value and compute
-		 * the number of remaining WQEs.
-		 */
-		n = wq->head;
-		if (n >= rq->size)
-			n = 0;
-		if (n < tail)
-			n += rq->size - tail;
-		else
-			n -= tail;
-		if (n < srq->limit) {
-			struct ib_event ev;
-
-			srq->limit = 0;
-			spin_unlock_irqrestore(&rq->lock, flags);
-			ev.device = qp->ibqp.device;
-			ev.element.srq = qp->ibqp.srq;
-			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-			handler(&ev, srq->ibsrq.srq_context);
-		} else
-			spin_unlock_irqrestore(&rq->lock, flags);
-	} else
-		spin_unlock_irqrestore(&rq->lock, flags);
-
-	ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
-	if (ah_attr->ah_flags & IB_AH_GRH) {
-		ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
-		wc.wc_flags |= IB_WC_GRH;
-	} else
-		ipath_skip_sge(&rsge, sizeof(struct ib_grh));
-	sge = swqe->sg_list;
-	while (length) {
-		u32 len = sge->length;
-
-		if (len > length)
-			len = length;
-		if (len > sge->sge_length)
-			len = sge->sge_length;
-		BUG_ON(len == 0);
-		ipath_copy_sge(&rsge, sge->vaddr, len);
-		sge->vaddr += len;
-		sge->length -= len;
-		sge->sge_length -= len;
-		if (sge->sge_length == 0) {
-			if (--swqe->wr.num_sge)
-				sge++;
-		} else if (sge->length == 0 && sge->mr != NULL) {
-			if (++sge->n >= IPATH_SEGSZ) {
-				if (++sge->m >= sge->mr->mapsz)
-					break;
-				sge->n = 0;
-			}
-			sge->vaddr =
-				sge->mr->map[sge->m]->segs[sge->n].vaddr;
-			sge->length =
-				sge->mr->map[sge->m]->segs[sge->n].length;
-		}
-		length -= len;
-	}
-	wc.status = IB_WC_SUCCESS;
-	wc.opcode = IB_WC_RECV;
-	wc.qp = &qp->ibqp;
-	wc.src_qp = sqp->ibqp.qp_num;
-	/* XXX do we know which pkey matched? Only needed for GSI. */
-	wc.pkey_index = 0;
-	wc.slid = dev->dd->ipath_lid |
-		(ah_attr->src_path_bits &
-		 ((1 << dev->dd->ipath_lmc) - 1));
-	wc.sl = ah_attr->sl;
-	wc.dlid_path_bits =
-		ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
-	wc.port_num = 1;
-	/* Signal completion event if the solicited bit is set. */
-	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-		       swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED);
-drop:
-	if (atomic_dec_and_test(&qp->refcount))
-		wake_up(&qp->wait);
-done:;
-}
-
-/**
- * ipath_make_ud_req - construct a UD request packet
- * @qp: the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_ud_req(struct ipath_qp *qp)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	struct ipath_other_headers *ohdr;
-	struct ib_ah_attr *ah_attr;
-	struct ipath_swqe *wqe;
-	unsigned long flags;
-	u32 nwords;
-	u32 extra_bytes;
-	u32 bth0;
-	u16 lrh0;
-	u16 lid;
-	int ret = 0;
-	int next_cur;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
-		if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-			goto bail;
-		/* We are in the error state, flush the work request. */
-		if (qp->s_last == qp->s_head)
-			goto bail;
-		/* If DMAs are in progress, we can't flush immediately. */
-		if (atomic_read(&qp->s_dma_busy)) {
-			qp->s_flags |= IPATH_S_WAIT_DMA;
-			goto bail;
-		}
-		wqe = get_swqe_ptr(qp, qp->s_last);
-		ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-		goto done;
-	}
-
-	if (qp->s_cur == qp->s_head)
-		goto bail;
-
-	wqe = get_swqe_ptr(qp, qp->s_cur);
-	next_cur = qp->s_cur + 1;
-	if (next_cur >= qp->s_size)
-		next_cur = 0;
-
-	/* Construct the header. */
-	ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
-	if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
-		if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
-			dev->n_multicast_xmit++;
-		else
-			dev->n_unicast_xmit++;
-	} else {
-		dev->n_unicast_xmit++;
-		lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
-		if (unlikely(lid == dev->dd->ipath_lid)) {
-			/*
-			 * If DMAs are in progress, we can't generate
-			 * a completion for the loopback packet since
-			 * it would be out of order.
-			 * XXX Instead of waiting, we could queue a
-			 * zero length descriptor so we get a callback.
-			 */
-			if (atomic_read(&qp->s_dma_busy)) {
-				qp->s_flags |= IPATH_S_WAIT_DMA;
-				goto bail;
-			}
-			qp->s_cur = next_cur;
-			spin_unlock_irqrestore(&qp->s_lock, flags);
-			ipath_ud_loopback(qp, wqe);
-			spin_lock_irqsave(&qp->s_lock, flags);
-			ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
-			goto done;
-		}
-	}
-
-	qp->s_cur = next_cur;
-	extra_bytes = -wqe->length & 3;
-	nwords = (wqe->length + extra_bytes) >> 2;
-
-	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
-	qp->s_hdrwords = 7;
-	qp->s_cur_size = wqe->length;
-	qp->s_cur_sge = &qp->s_sge;
-	qp->s_dmult = ah_attr->static_rate;
-	qp->s_wqe = wqe;
-	qp->s_sge.sge = wqe->sg_list[0];
-	qp->s_sge.sg_list = wqe->sg_list + 1;
-	qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge;
-
-	if (ah_attr->ah_flags & IB_AH_GRH) {
-		/* Header size in 32-bit words. */
-		qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
-						 &ah_attr->grh,
-						 qp->s_hdrwords, nwords);
-		lrh0 = IPATH_LRH_GRH;
-		ohdr = &qp->s_hdr.u.l.oth;
-		/*
-		 * Don't worry about sending to locally attached multicast
-		 * QPs.  It is unspecified by the spec. what happens.
-		 */
-	} else {
-		/* Header size in 32-bit words. */
-		lrh0 = IPATH_LRH_BTH;
-		ohdr = &qp->s_hdr.u.oth;
-	}
-	if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) {
-		qp->s_hdrwords++;
-		ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data;
-		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
-	} else
-		bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
-	lrh0 |= ah_attr->sl << 4;
-	if (qp->ibqp.qp_type == IB_QPT_SMI)
-		lrh0 |= 0xF000;	/* Set VL (see ch. 13.5.3.1) */
-	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-	qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);	/* DEST LID */
-	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
-					   SIZE_OF_CRC);
-	lid = dev->dd->ipath_lid;
-	if (lid) {
-		lid |= ah_attr->src_path_bits &
-			((1 << dev->dd->ipath_lmc) - 1);
-		qp->s_hdr.lrh[3] = cpu_to_be16(lid);
-	} else
-		qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
-	if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED)
-		bth0 |= 1 << 23;
-	bth0 |= extra_bytes << 20;
-	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
-		ipath_get_pkey(dev->dd, qp->s_pkey_index);
-	ohdr->bth[0] = cpu_to_be32(bth0);
-	/*
-	 * Use the multicast QP if the destination LID is a multicast LID.
-	 */
-	ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
-		ah_attr->dlid != IPATH_PERMISSIVE_LID ?
-		cpu_to_be32(IPATH_MULTICAST_QPN) :
-		cpu_to_be32(wqe->ud_wr.remote_qpn);
-	ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
-	/*
-	 * Qkeys with the high order bit set mean use the
-	 * qkey from the QP context instead of the WR (see 10.2.5).
-	 */
-	ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
-					 qp->qkey : wqe->ud_wr.remote_qkey);
-	ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-
-done:
-	ret = 1;
-	goto unlock;
-
-bail:
-	qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	return ret;
-}
-
-/**
- * ipath_ud_rcv - receive an incoming UD packet
- * @dev: the device the packet came in on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-	struct ipath_other_headers *ohdr;
-	int opcode;
-	u32 hdrsize;
-	u32 pad;
-	struct ib_wc wc;
-	u32 qkey;
-	u32 src_qp;
-	u16 dlid;
-	int header_in_data;
-
-	/* Check for GRH */
-	if (!has_grh) {
-		ohdr = &hdr->u.oth;
-		hdrsize = 8 + 12 + 8;	/* LRH + BTH + DETH */
-		qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
-		src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
-		header_in_data = 0;
-	} else {
-		ohdr = &hdr->u.l.oth;
-		hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
-		/*
-		 * The header with GRH is 68 bytes and the core driver sets
-		 * the eager header buffer size to 56 bytes so the last 12
-		 * bytes of the IB header is in the data buffer.
-		 */
-		header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-		if (header_in_data) {
-			qkey = be32_to_cpu(((__be32 *) data)[1]);
-			src_qp = be32_to_cpu(((__be32 *) data)[2]);
-			data += 12;
-		} else {
-			qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
-			src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
-		}
-	}
-	src_qp &= IPATH_QPN_MASK;
-
-	/*
-	 * Check that the permissive LID is only used on QP0
-	 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
-	 */
-	if (qp->ibqp.qp_num) {
-		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-			     hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-			dev->n_pkt_drops++;
-			goto bail;
-		}
-		if (unlikely(qkey != qp->qkey)) {
-			/* XXX OK to lose a count once in a while. */
-			dev->qkey_violations++;
-			dev->n_pkt_drops++;
-			goto bail;
-		}
-	} else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
-		   hdr->lrh[3] == IB_LID_PERMISSIVE) {
-		struct ib_smp *smp = (struct ib_smp *) data;
-
-		if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-			dev->n_pkt_drops++;
-			goto bail;
-		}
-	}
-
-	/*
-	 * The opcode is in the low byte when its in network order
-	 * (top byte when in host order).
-	 */
-	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-	if (qp->ibqp.qp_num > 1 &&
-	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
-		if (header_in_data) {
-			wc.ex.imm_data = *(__be32 *) data;
-			data += sizeof(__be32);
-		} else
-			wc.ex.imm_data = ohdr->u.ud.imm_data;
-		wc.wc_flags = IB_WC_WITH_IMM;
-		hdrsize += sizeof(u32);
-	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
-		wc.ex.imm_data = 0;
-		wc.wc_flags = 0;
-	} else {
-		dev->n_pkt_drops++;
-		goto bail;
-	}
-
-	/* Get the number of bytes the message was padded by. */
-	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-	if (unlikely(tlen < (hdrsize + pad + 4))) {
-		/* Drop incomplete packets. */
-		dev->n_pkt_drops++;
-		goto bail;
-	}
-	tlen -= hdrsize + pad + 4;
-
-	/* Drop invalid MAD packets (see 13.5.3.1). */
-	if (unlikely((qp->ibqp.qp_num == 0 &&
-		      (tlen != 256 ||
-		       (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
-		     (qp->ibqp.qp_num == 1 &&
-		      (tlen != 256 ||
-		       (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
-		dev->n_pkt_drops++;
-		goto bail;
-	}
-
-	/*
-	 * A GRH is expected to precede the data even if not
-	 * present on the wire.
-	 */
-	wc.byte_len = tlen + sizeof(struct ib_grh);
-
-	/*
-	 * Get the next work request entry to find where to put the data.
-	 */
-	if (qp->r_flags & IPATH_R_REUSE_SGE)
-		qp->r_flags &= ~IPATH_R_REUSE_SGE;
-	else if (!ipath_get_rwqe(qp, 0)) {
-		/*
-		 * Count VL15 packets dropped due to no receive buffer.
-		 * Otherwise, count them as buffer overruns since usually,
-		 * the HW will be able to receive packets even if there are
-		 * no QPs with posted receive buffers.
-		 */
-		if (qp->ibqp.qp_num == 0)
-			dev->n_vl15_dropped++;
-		else
-			dev->rcv_errors++;
-		goto bail;
-	}
-	/* Silently drop packets which are too big. */
-	if (wc.byte_len > qp->r_len) {
-		qp->r_flags |= IPATH_R_REUSE_SGE;
-		dev->n_pkt_drops++;
-		goto bail;
-	}
-	if (has_grh) {
-		ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
-			       sizeof(struct ib_grh));
-		wc.wc_flags |= IB_WC_GRH;
-	} else
-		ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
-	ipath_copy_sge(&qp->r_sge, data,
-		       wc.byte_len - sizeof(struct ib_grh));
-	if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-		goto bail;
-	wc.wr_id = qp->r_wr_id;
-	wc.status = IB_WC_SUCCESS;
-	wc.opcode = IB_WC_RECV;
-	wc.vendor_err = 0;
-	wc.qp = &qp->ibqp;
-	wc.src_qp = src_qp;
-	/* XXX do we know which pkey matched? Only needed for GSI. */
-	wc.pkey_index = 0;
-	wc.slid = be16_to_cpu(hdr->lrh[3]);
-	wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
-	dlid = be16_to_cpu(hdr->lrh[1]);
-	/*
-	 * Save the LMC lower bits if the destination LID is a unicast LID.
-	 */
-	wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
-		dlid & ((1 << dev->dd->ipath_lmc) - 1);
-	wc.port_num = 1;
-	/* Signal completion event if the solicited bit is set. */
-	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-		       (ohdr->bth[0] &
-			cpu_to_be32(1 << 23)) != 0);
-
-bail:;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_pages.c b/drivers/staging/rdma/ipath/ipath_user_pages.c
deleted file mode 100644
index d29b4da..0000000
--- a/drivers/staging/rdma/ipath/ipath_user_pages.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-static void __ipath_release_user_pages(struct page **p, size_t num_pages,
-				   int dirty)
-{
-	size_t i;
-
-	for (i = 0; i < num_pages; i++) {
-		ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
-			   (unsigned long) num_pages, p[i]);
-		if (dirty)
-			set_page_dirty_lock(p[i]);
-		put_page(p[i]);
-	}
-}
-
-/* call with current->mm->mmap_sem held */
-static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-				  struct page **p)
-{
-	unsigned long lock_limit;
-	size_t got;
-	int ret;
-
-	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
-	if (num_pages > lock_limit) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
-		   (unsigned long) num_pages, start_page);
-
-	for (got = 0; got < num_pages; got += ret) {
-		ret = get_user_pages(current, current->mm,
-				     start_page + got * PAGE_SIZE,
-				     num_pages - got, 1, 1,
-				     p + got, NULL);
-		if (ret < 0)
-			goto bail_release;
-	}
-
-	current->mm->pinned_vm += num_pages;
-
-	ret = 0;
-	goto bail;
-
-bail_release:
-	__ipath_release_user_pages(p, got, 0);
-bail:
-	return ret;
-}
-
-/**
- * ipath_map_page - a safety wrapper around pci_map_page()
- *
- * A dma_addr of all 0's is interpreted by the chip as "disabled".
- * Unfortunately, it can also be a valid dma_addr returned on some
- * architectures.
- *
- * The powerpc iommu assigns dma_addrs in ascending order, so we don't
- * have to bother with retries or mapping a dummy page to insure we
- * don't just get the same mapping again.
- *
- * I'm sure we won't be so lucky with other iommu's, so FIXME.
- */
-dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page,
-	unsigned long offset, size_t size, int direction)
-{
-	dma_addr_t phys;
-
-	phys = pci_map_page(hwdev, page, offset, size, direction);
-
-	if (phys == 0) {
-		pci_unmap_page(hwdev, phys, size, direction);
-		phys = pci_map_page(hwdev, page, offset, size, direction);
-		/*
-		 * FIXME: If we get 0 again, we should keep this page,
-		 * map another, then free the 0 page.
-		 */
-	}
-
-	return phys;
-}
-
-/**
- * ipath_map_single - a safety wrapper around pci_map_single()
- *
- * Same idea as ipath_map_page().
- */
-dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
-	int direction)
-{
-	dma_addr_t phys;
-
-	phys = pci_map_single(hwdev, ptr, size, direction);
-
-	if (phys == 0) {
-		pci_unmap_single(hwdev, phys, size, direction);
-		phys = pci_map_single(hwdev, ptr, size, direction);
-		/*
-		 * FIXME: If we get 0 again, we should keep this page,
-		 * map another, then free the 0 page.
-		 */
-	}
-
-	return phys;
-}
-
-/**
- * ipath_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages.  For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-			 struct page **p)
-{
-	int ret;
-
-	down_write(&current->mm->mmap_sem);
-
-	ret = __ipath_get_user_pages(start_page, num_pages, p);
-
-	up_write(&current->mm->mmap_sem);
-
-	return ret;
-}
-
-void ipath_release_user_pages(struct page **p, size_t num_pages)
-{
-	down_write(&current->mm->mmap_sem);
-
-	__ipath_release_user_pages(p, num_pages, 1);
-
-	current->mm->pinned_vm -= num_pages;
-
-	up_write(&current->mm->mmap_sem);
-}
-
-struct ipath_user_pages_work {
-	struct work_struct work;
-	struct mm_struct *mm;
-	unsigned long num_pages;
-};
-
-static void user_pages_account(struct work_struct *_work)
-{
-	struct ipath_user_pages_work *work =
-		container_of(_work, struct ipath_user_pages_work, work);
-
-	down_write(&work->mm->mmap_sem);
-	work->mm->pinned_vm -= work->num_pages;
-	up_write(&work->mm->mmap_sem);
-	mmput(work->mm);
-	kfree(work);
-}
-
-void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
-{
-	struct ipath_user_pages_work *work;
-	struct mm_struct *mm;
-
-	__ipath_release_user_pages(p, num_pages, 1);
-
-	mm = get_task_mm(current);
-	if (!mm)
-		return;
-
-	work = kmalloc(sizeof(*work), GFP_KERNEL);
-	if (!work)
-		goto bail_mm;
-
-	INIT_WORK(&work->work, user_pages_account);
-	work->mm = mm;
-	work->num_pages = num_pages;
-
-	queue_work(ib_wq, &work->work);
-	return;
-
-bail_mm:
-	mmput(mm);
-	return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.c b/drivers/staging/rdma/ipath/ipath_user_sdma.c
deleted file mode 100644
index 8c12e3c..0000000
--- a/drivers/staging/rdma/ipath/ipath_user_sdma.c
+++ /dev/null
@@ -1,874 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_user_sdma.h"
-
-/* minimum size of header */
-#define IPATH_USER_SDMA_MIN_HEADER_LENGTH	64
-/* expected size of headers (for dma_pool) */
-#define IPATH_USER_SDMA_EXP_HEADER_LENGTH	64
-/* length mask in PBC (lower 11 bits) */
-#define IPATH_PBC_LENGTH_MASK			((1 << 11) - 1)
-
-struct ipath_user_sdma_pkt {
-	u8 naddr;		/* dimension of addr (1..3) ... */
-	u32 counter;		/* sdma pkts queued counter for this entry */
-	u64 added;		/* global descq number of entries */
-
-	struct {
-		u32 offset;			/* offset for kvaddr, addr */
-		u32 length;			/* length in page */
-		u8  put_page;			/* should we put_page? */
-		u8  dma_mapped;			/* is page dma_mapped? */
-		struct page *page;		/* may be NULL (coherent mem) */
-		void *kvaddr;			/* FIXME: only for pio hack */
-		dma_addr_t addr;
-	} addr[4];   /* max pages, any more and we coalesce */
-	struct list_head list;	/* list element */
-};
-
-struct ipath_user_sdma_queue {
-	/*
-	 * pkts sent to dma engine are queued on this
-	 * list head.  the type of the elements of this
-	 * list are struct ipath_user_sdma_pkt...
-	 */
-	struct list_head sent;
-
-	/* headers with expected length are allocated from here... */
-	char header_cache_name[64];
-	struct dma_pool *header_cache;
-
-	/* packets are allocated from the slab cache... */
-	char pkt_slab_name[64];
-	struct kmem_cache *pkt_slab;
-
-	/* as packets go on the queued queue, they are counted... */
-	u32 counter;
-	u32 sent_counter;
-
-	/* dma page table */
-	struct rb_root dma_pages_root;
-
-	/* protect everything above... */
-	struct mutex lock;
-};
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
-{
-	struct ipath_user_sdma_queue *pq =
-		kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
-
-	if (!pq)
-		goto done;
-
-	pq->counter = 0;
-	pq->sent_counter = 0;
-	INIT_LIST_HEAD(&pq->sent);
-
-	mutex_init(&pq->lock);
-
-	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
-		 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
-	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
-					 sizeof(struct ipath_user_sdma_pkt),
-					 0, 0, NULL);
-
-	if (!pq->pkt_slab)
-		goto err_kfree;
-
-	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
-		 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
-	pq->header_cache = dma_pool_create(pq->header_cache_name,
-					   dev,
-					   IPATH_USER_SDMA_EXP_HEADER_LENGTH,
-					   4, 0);
-	if (!pq->header_cache)
-		goto err_slab;
-
-	pq->dma_pages_root = RB_ROOT;
-
-	goto done;
-
-err_slab:
-	kmem_cache_destroy(pq->pkt_slab);
-err_kfree:
-	kfree(pq);
-	pq = NULL;
-
-done:
-	return pq;
-}
-
-static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
-				      int i, size_t offset, size_t len,
-				      int put_page, int dma_mapped,
-				      struct page *page,
-				      void *kvaddr, dma_addr_t dma_addr)
-{
-	pkt->addr[i].offset = offset;
-	pkt->addr[i].length = len;
-	pkt->addr[i].put_page = put_page;
-	pkt->addr[i].dma_mapped = dma_mapped;
-	pkt->addr[i].page = page;
-	pkt->addr[i].kvaddr = kvaddr;
-	pkt->addr[i].addr = dma_addr;
-}
-
-static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
-					u32 counter, size_t offset,
-					size_t len, int dma_mapped,
-					struct page *page,
-					void *kvaddr, dma_addr_t dma_addr)
-{
-	pkt->naddr = 1;
-	pkt->counter = counter;
-	ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
-				  kvaddr, dma_addr);
-}
-
-/* we've too many pages in the iovec, coalesce to a single page */
-static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
-				    struct ipath_user_sdma_pkt *pkt,
-				    const struct iovec *iov,
-				    unsigned long niov) {
-	int ret = 0;
-	struct page *page = alloc_page(GFP_KERNEL);
-	void *mpage_save;
-	char *mpage;
-	int i;
-	int len = 0;
-	dma_addr_t dma_addr;
-
-	if (!page) {
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	mpage = kmap(page);
-	mpage_save = mpage;
-	for (i = 0; i < niov; i++) {
-		int cfur;
-
-		cfur = copy_from_user(mpage,
-				      iov[i].iov_base, iov[i].iov_len);
-		if (cfur) {
-			ret = -EFAULT;
-			goto free_unmap;
-		}
-
-		mpage += iov[i].iov_len;
-		len += iov[i].iov_len;
-	}
-
-	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
-				DMA_TO_DEVICE);
-	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-		ret = -ENOMEM;
-		goto free_unmap;
-	}
-
-	ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
-				  dma_addr);
-	pkt->naddr = 2;
-
-	goto done;
-
-free_unmap:
-	kunmap(page);
-	__free_page(page);
-done:
-	return ret;
-}
-
-/* how many pages in this iovec element? */
-static int ipath_user_sdma_num_pages(const struct iovec *iov)
-{
-	const unsigned long addr  = (unsigned long) iov->iov_base;
-	const unsigned long  len  = iov->iov_len;
-	const unsigned long spage = addr & PAGE_MASK;
-	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
-	return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-/* truncate length to page boundary */
-static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
-{
-	const unsigned long offset = offset_in_page(addr);
-
-	return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
-}
-
-static void ipath_user_sdma_free_pkt_frag(struct device *dev,
-					  struct ipath_user_sdma_queue *pq,
-					  struct ipath_user_sdma_pkt *pkt,
-					  int frag)
-{
-	const int i = frag;
-
-	if (pkt->addr[i].page) {
-		if (pkt->addr[i].dma_mapped)
-			dma_unmap_page(dev,
-				       pkt->addr[i].addr,
-				       pkt->addr[i].length,
-				       DMA_TO_DEVICE);
-
-		if (pkt->addr[i].kvaddr)
-			kunmap(pkt->addr[i].page);
-
-		if (pkt->addr[i].put_page)
-			put_page(pkt->addr[i].page);
-		else
-			__free_page(pkt->addr[i].page);
-	} else if (pkt->addr[i].kvaddr)
-		/* free coherent mem from cache... */
-		dma_pool_free(pq->header_cache,
-			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
-}
-
-/* return number of pages pinned... */
-static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
-				     struct ipath_user_sdma_pkt *pkt,
-				     unsigned long addr, int tlen, int npages)
-{
-	struct page *pages[2];
-	int j;
-	int ret;
-
-	ret = get_user_pages_fast(addr, npages, 0, pages);
-	if (ret != npages) {
-		int i;
-
-		for (i = 0; i < ret; i++)
-			put_page(pages[i]);
-
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	for (j = 0; j < npages; j++) {
-		/* map the pages... */
-		const int flen =
-			ipath_user_sdma_page_length(addr, tlen);
-		dma_addr_t dma_addr =
-			dma_map_page(&dd->pcidev->dev,
-				     pages[j], 0, flen, DMA_TO_DEVICE);
-		unsigned long fofs = offset_in_page(addr);
-
-		if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-			ret = -ENOMEM;
-			goto done;
-		}
-
-		ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
-					  pages[j], kmap(pages[j]),
-					  dma_addr);
-
-		pkt->naddr++;
-		addr += flen;
-		tlen -= flen;
-	}
-
-done:
-	return ret;
-}
-
-static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
-				   struct ipath_user_sdma_queue *pq,
-				   struct ipath_user_sdma_pkt *pkt,
-				   const struct iovec *iov,
-				   unsigned long niov)
-{
-	int ret = 0;
-	unsigned long idx;
-
-	for (idx = 0; idx < niov; idx++) {
-		const int npages = ipath_user_sdma_num_pages(iov + idx);
-		const unsigned long addr = (unsigned long) iov[idx].iov_base;
-
-		ret = ipath_user_sdma_pin_pages(dd, pkt,
-						addr, iov[idx].iov_len,
-						npages);
-		if (ret < 0)
-			goto free_pkt;
-	}
-
-	goto done;
-
-free_pkt:
-	for (idx = 0; idx < pkt->naddr; idx++)
-		ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
-
-done:
-	return ret;
-}
-
-static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
-					struct ipath_user_sdma_queue *pq,
-					struct ipath_user_sdma_pkt *pkt,
-					const struct iovec *iov,
-					unsigned long niov, int npages)
-{
-	int ret = 0;
-
-	if (npages >= ARRAY_SIZE(pkt->addr))
-		ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
-	else
-		ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
-
-	return ret;
-}
-
-/* free a packet list -- return counter value of last packet */
-static void ipath_user_sdma_free_pkt_list(struct device *dev,
-					  struct ipath_user_sdma_queue *pq,
-					  struct list_head *list)
-{
-	struct ipath_user_sdma_pkt *pkt, *pkt_next;
-
-	list_for_each_entry_safe(pkt, pkt_next, list, list) {
-		int i;
-
-		for (i = 0; i < pkt->naddr; i++)
-			ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
-
-		kmem_cache_free(pq->pkt_slab, pkt);
-	}
-}
-
-/*
- * copy headers, coalesce etc -- pq->lock must be held
- *
- * we queue all the packets to list, returning the
- * number of bytes total.  list must be empty initially,
- * as, if there is an error we clean it...
- */
-static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
-				      struct ipath_user_sdma_queue *pq,
-				      struct list_head *list,
-				      const struct iovec *iov,
-				      unsigned long niov,
-				      int maxpkts)
-{
-	unsigned long idx = 0;
-	int ret = 0;
-	int npkts = 0;
-	struct page *page = NULL;
-	__le32 *pbc;
-	dma_addr_t dma_addr;
-	struct ipath_user_sdma_pkt *pkt = NULL;
-	size_t len;
-	size_t nw;
-	u32 counter = pq->counter;
-	int dma_mapped = 0;
-
-	while (idx < niov && npkts < maxpkts) {
-		const unsigned long addr = (unsigned long) iov[idx].iov_base;
-		const unsigned long idx_save = idx;
-		unsigned pktnw;
-		unsigned pktnwc;
-		int nfrags = 0;
-		int npages = 0;
-		int cfur;
-
-		dma_mapped = 0;
-		len = iov[idx].iov_len;
-		nw = len >> 2;
-		page = NULL;
-
-		pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
-		if (!pkt) {
-			ret = -ENOMEM;
-			goto free_list;
-		}
-
-		if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
-		    len > PAGE_SIZE || len & 3 || addr & 3) {
-			ret = -EINVAL;
-			goto free_pkt;
-		}
-
-		if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
-			pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
-					     &dma_addr);
-		else
-			pbc = NULL;
-
-		if (!pbc) {
-			page = alloc_page(GFP_KERNEL);
-			if (!page) {
-				ret = -ENOMEM;
-				goto free_pkt;
-			}
-			pbc = kmap(page);
-		}
-
-		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
-		if (cfur) {
-			ret = -EFAULT;
-			goto free_pbc;
-		}
-
-		/*
-		 * this assignment is a bit strange.  it's because the
-		 * the pbc counts the number of 32 bit words in the full
-		 * packet _except_ the first word of the pbc itself...
-		 */
-		pktnwc = nw - 1;
-
-		/*
-		 * pktnw computation yields the number of 32 bit words
-		 * that the caller has indicated in the PBC.  note that
-		 * this is one less than the total number of words that
-		 * goes to the send DMA engine as the first 32 bit word
-		 * of the PBC itself is not counted.  Armed with this count,
-		 * we can verify that the packet is consistent with the
-		 * iovec lengths.
-		 */
-		pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
-		if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
-			ret = -EINVAL;
-			goto free_pbc;
-		}
-
-
-		idx++;
-		while (pktnwc < pktnw && idx < niov) {
-			const size_t slen = iov[idx].iov_len;
-			const unsigned long faddr =
-				(unsigned long) iov[idx].iov_base;
-
-			if (slen & 3 || faddr & 3 || !slen ||
-			    slen > PAGE_SIZE) {
-				ret = -EINVAL;
-				goto free_pbc;
-			}
-
-			npages++;
-			if ((faddr & PAGE_MASK) !=
-			    ((faddr + slen - 1) & PAGE_MASK))
-				npages++;
-
-			pktnwc += slen >> 2;
-			idx++;
-			nfrags++;
-		}
-
-		if (pktnwc != pktnw) {
-			ret = -EINVAL;
-			goto free_pbc;
-		}
-
-		if (page) {
-			dma_addr = dma_map_page(&dd->pcidev->dev,
-						page, 0, len, DMA_TO_DEVICE);
-			if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-				ret = -ENOMEM;
-				goto free_pbc;
-			}
-
-			dma_mapped = 1;
-		}
-
-		ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
-					    page, pbc, dma_addr);
-
-		if (nfrags) {
-			ret = ipath_user_sdma_init_payload(dd, pq, pkt,
-							   iov + idx_save + 1,
-							   nfrags, npages);
-			if (ret < 0)
-				goto free_pbc_dma;
-		}
-
-		counter++;
-		npkts++;
-
-		list_add_tail(&pkt->list, list);
-	}
-
-	ret = idx;
-	goto done;
-
-free_pbc_dma:
-	if (dma_mapped)
-		dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
-free_pbc:
-	if (page) {
-		kunmap(page);
-		__free_page(page);
-	} else
-		dma_pool_free(pq->header_cache, pbc, dma_addr);
-free_pkt:
-	kmem_cache_free(pq->pkt_slab, pkt);
-free_list:
-	ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
-done:
-	return ret;
-}
-
-static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
-						 u32 c)
-{
-	pq->sent_counter = c;
-}
-
-/* try to clean out queue -- needs pq->lock */
-static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
-				       struct ipath_user_sdma_queue *pq)
-{
-	struct list_head free_list;
-	struct ipath_user_sdma_pkt *pkt;
-	struct ipath_user_sdma_pkt *pkt_prev;
-	int ret = 0;
-
-	INIT_LIST_HEAD(&free_list);
-
-	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
-		s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
-
-		if (descd < 0)
-			break;
-
-		list_move_tail(&pkt->list, &free_list);
-
-		/* one more packet cleaned */
-		ret++;
-	}
-
-	if (!list_empty(&free_list)) {
-		u32 counter;
-
-		pkt = list_entry(free_list.prev,
-				 struct ipath_user_sdma_pkt, list);
-		counter = pkt->counter;
-
-		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
-		ipath_user_sdma_set_complete_counter(pq, counter);
-	}
-
-	return ret;
-}
-
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
-{
-	if (!pq)
-		return;
-
-	kmem_cache_destroy(pq->pkt_slab);
-	dma_pool_destroy(pq->header_cache);
-	kfree(pq);
-}
-
-/* clean descriptor queue, returns > 0 if some elements cleaned */
-static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-	ret = ipath_sdma_make_progress(dd);
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-	return ret;
-}
-
-/* we're in close, drain packets so that we can cleanup successfully... */
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
-				 struct ipath_user_sdma_queue *pq)
-{
-	int i;
-
-	if (!pq)
-		return;
-
-	for (i = 0; i < 100; i++) {
-		mutex_lock(&pq->lock);
-		if (list_empty(&pq->sent)) {
-			mutex_unlock(&pq->lock);
-			break;
-		}
-		ipath_user_sdma_hwqueue_clean(dd);
-		ipath_user_sdma_queue_clean(dd, pq);
-		mutex_unlock(&pq->lock);
-		msleep(10);
-	}
-
-	if (!list_empty(&pq->sent)) {
-		struct list_head free_list;
-
-		printk(KERN_INFO "drain: lists not empty: forcing!\n");
-		INIT_LIST_HEAD(&free_list);
-		mutex_lock(&pq->lock);
-		list_splice_init(&pq->sent, &free_list);
-		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
-		mutex_unlock(&pq->lock);
-	}
-}
-
-static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
-					   u64 addr, u64 dwlen, u64 dwoffset)
-{
-	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
-			   ((addr & 0xfffffffcULL) << 32) |
-			   /* SDmaGeneration[1:0] */
-			   ((dd->ipath_sdma_generation & 3ULL) << 30) |
-			   /* SDmaDwordCount[10:0] */
-			   ((dwlen & 0x7ffULL) << 16) |
-			   /* SDmaBufOffset[12:2] */
-			   (dwoffset & 0x7ffULL));
-}
-
-static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
-{
-	return descq | cpu_to_le64(1ULL << 12);
-}
-
-static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
-{
-					      /* last */  /* dma head */
-	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
-}
-
-static inline __le64 ipath_sdma_make_desc1(u64 addr)
-{
-	/* SDmaPhyAddr[47:32] */
-	return cpu_to_le64(addr >> 32);
-}
-
-static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
-				      struct ipath_user_sdma_pkt *pkt, int idx,
-				      unsigned ofs, u16 tail)
-{
-	const u64 addr = (u64) pkt->addr[idx].addr +
-		(u64) pkt->addr[idx].offset;
-	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
-	__le64 *descqp;
-	__le64 descq0;
-
-	descqp = &dd->ipath_sdma_descq[tail].qw[0];
-
-	descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
-	if (idx == 0)
-		descq0 = ipath_sdma_make_first_desc0(descq0);
-	if (idx == pkt->naddr - 1)
-		descq0 = ipath_sdma_make_last_desc0(descq0);
-
-	descqp[0] = descq0;
-	descqp[1] = ipath_sdma_make_desc1(addr);
-}
-
-/* pq->lock must be held, get packets on the wire... */
-static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
-				     struct ipath_user_sdma_queue *pq,
-				     struct list_head *pktlist)
-{
-	int ret = 0;
-	unsigned long flags;
-	u16 tail;
-
-	if (list_empty(pktlist))
-		return 0;
-
-	if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
-		return -ECOMM;
-
-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-	if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
-		ret = -ECOMM;
-		goto unlock;
-	}
-
-	tail = dd->ipath_sdma_descq_tail;
-	while (!list_empty(pktlist)) {
-		struct ipath_user_sdma_pkt *pkt =
-			list_entry(pktlist->next, struct ipath_user_sdma_pkt,
-				   list);
-		int i;
-		unsigned ofs = 0;
-		u16 dtail = tail;
-
-		if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
-			goto unlock_check_tail;
-
-		for (i = 0; i < pkt->naddr; i++) {
-			ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
-			ofs += pkt->addr[i].length >> 2;
-
-			if (++tail == dd->ipath_sdma_descq_cnt) {
-				tail = 0;
-				++dd->ipath_sdma_generation;
-			}
-		}
-
-		if ((ofs<<2) > dd->ipath_ibmaxlen) {
-			ipath_dbg("packet size %X > ibmax %X, fail\n",
-				ofs<<2, dd->ipath_ibmaxlen);
-			ret = -EMSGSIZE;
-			goto unlock;
-		}
-
-		/*
-		 * if the packet is >= 2KB mtu equivalent, we have to use
-		 * the large buffers, and have to mark each descriptor as
-		 * part of a large buffer packet.
-		 */
-		if (ofs >= IPATH_SMALLBUF_DWORDS) {
-			for (i = 0; i < pkt->naddr; i++) {
-				dd->ipath_sdma_descq[dtail].qw[0] |=
-					cpu_to_le64(1ULL << 14);
-				if (++dtail == dd->ipath_sdma_descq_cnt)
-					dtail = 0;
-			}
-		}
-
-		dd->ipath_sdma_descq_added += pkt->naddr;
-		pkt->added = dd->ipath_sdma_descq_added;
-		list_move_tail(&pkt->list, &pq->sent);
-		ret++;
-	}
-
-unlock_check_tail:
-	/* advance the tail on the chip if necessary */
-	if (dd->ipath_sdma_descq_tail != tail) {
-		wmb();
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
-		dd->ipath_sdma_descq_tail = tail;
-	}
-
-unlock:
-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-	return ret;
-}
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
-			   struct ipath_user_sdma_queue *pq,
-			   const struct iovec *iov,
-			   unsigned long dim)
-{
-	int ret = 0;
-	struct list_head list;
-	int npkts = 0;
-
-	INIT_LIST_HEAD(&list);
-
-	mutex_lock(&pq->lock);
-
-	if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
-		ipath_user_sdma_hwqueue_clean(dd);
-		ipath_user_sdma_queue_clean(dd, pq);
-	}
-
-	while (dim) {
-		const int mxp = 8;
-
-		ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
-		if (ret <= 0)
-			goto done_unlock;
-		else {
-			dim -= ret;
-			iov += ret;
-		}
-
-		/* force packets onto the sdma hw queue... */
-		if (!list_empty(&list)) {
-			/*
-			 * lazily clean hw queue.  the 4 is a guess of about
-			 * how many sdma descriptors a packet will take (it
-			 * doesn't have to be perfect).
-			 */
-			if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
-				ipath_user_sdma_hwqueue_clean(dd);
-				ipath_user_sdma_queue_clean(dd, pq);
-			}
-
-			ret = ipath_user_sdma_push_pkts(dd, pq, &list);
-			if (ret < 0)
-				goto done_unlock;
-			else {
-				npkts += ret;
-				pq->counter += ret;
-
-				if (!list_empty(&list))
-					goto done_unlock;
-			}
-		}
-	}
-
-done_unlock:
-	if (!list_empty(&list))
-		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
-	mutex_unlock(&pq->lock);
-
-	return (ret < 0) ? ret : npkts;
-}
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
-				  struct ipath_user_sdma_queue *pq)
-{
-	int ret = 0;
-
-	mutex_lock(&pq->lock);
-	ipath_user_sdma_hwqueue_clean(dd);
-	ret = ipath_user_sdma_queue_clean(dd, pq);
-	mutex_unlock(&pq->lock);
-
-	return ret;
-}
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
-{
-	return pq->sent_counter;
-}
-
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
-{
-	return pq->counter;
-}
-
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.h b/drivers/staging/rdma/ipath/ipath_user_sdma.h
deleted file mode 100644
index fc76316..0000000
--- a/drivers/staging/rdma/ipath/ipath_user_sdma.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-
-struct ipath_user_sdma_queue;
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
-			   struct ipath_user_sdma_queue *pq,
-			   const struct iovec *iov,
-			   unsigned long dim);
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
-				  struct ipath_user_sdma_queue *pq);
-
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
-				 struct ipath_user_sdma_queue *pq);
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
deleted file mode 100644
index 53f9dca..0000000
--- a/drivers/staging/rdma/ipath/ipath_verbs.c
+++ /dev/null
@@ -1,2376 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-static unsigned int ib_ipath_qp_table_size = 251;
-module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-unsigned int ib_ipath_lkey_table_size = 12;
-module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
-		   S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
-		 "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int ib_ipath_max_pds = 0xFFFF;
-module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_pds,
-		 "Maximum number of protection domains to support");
-
-static unsigned int ib_ipath_max_ahs = 0xFFFF;
-module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int ib_ipath_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
-		 "Maximum number of completion queue entries to support");
-
-unsigned int ib_ipath_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
-		   S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int ib_ipath_max_qps = 16384;
-module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int ib_ipath_max_sges = 0x60;
-module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int ib_ipath_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
-		   S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
-		 "Maximum number of multicast groups to support");
-
-unsigned int ib_ipath_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
-		   uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
-		 "Maximum number of attached QPs to support");
-
-unsigned int ib_ipath_max_srqs = 1024;
-module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int ib_ipath_max_srq_sges = 128;
-module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
-		   uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
-		   uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-static unsigned int ib_ipath_disable_sma;
-module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(disable_sma, "Disable the SMA");
-
-/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; ipath_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
- */
-const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
-	[IB_QPS_RESET] = 0,
-	[IB_QPS_INIT] = IPATH_POST_RECV_OK,
-	[IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
-	[IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-	    IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK |
-	    IPATH_PROCESS_NEXT_SEND_OK,
-	[IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-	    IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
-	[IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-	    IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
-	[IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV |
-	    IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
-};
-
-struct ipath_ucontext {
-	struct ib_ucontext ibucontext;
-};
-
-static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
-						  *ibucontext)
-{
-	return container_of(ibucontext, struct ipath_ucontext, ibucontext);
-}
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
-	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
-	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
-	[IB_WR_SEND] = IB_WC_SEND,
-	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
-	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
-	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * System image GUID.
- */
-static __be64 sys_image_guid;
-
-/**
- * ipath_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
-{
-	struct ipath_sge *sge = &ss->sge;
-
-	while (length) {
-		u32 len = sge->length;
-
-		if (len > length)
-			len = length;
-		if (len > sge->sge_length)
-			len = sge->sge_length;
-		BUG_ON(len == 0);
-		memcpy(sge->vaddr, data, len);
-		sge->vaddr += len;
-		sge->length -= len;
-		sge->sge_length -= len;
-		if (sge->sge_length == 0) {
-			if (--ss->num_sge)
-				*sge = *ss->sg_list++;
-		} else if (sge->length == 0 && sge->mr != NULL) {
-			if (++sge->n >= IPATH_SEGSZ) {
-				if (++sge->m >= sge->mr->mapsz)
-					break;
-				sge->n = 0;
-			}
-			sge->vaddr =
-				sge->mr->map[sge->m]->segs[sge->n].vaddr;
-			sge->length =
-				sge->mr->map[sge->m]->segs[sge->n].length;
-		}
-		data += len;
-		length -= len;
-	}
-}
-
-/**
- * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
- * @ss: the SGE state
- * @length: the number of bytes to skip
- */
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
-{
-	struct ipath_sge *sge = &ss->sge;
-
-	while (length) {
-		u32 len = sge->length;
-
-		if (len > length)
-			len = length;
-		if (len > sge->sge_length)
-			len = sge->sge_length;
-		BUG_ON(len == 0);
-		sge->vaddr += len;
-		sge->length -= len;
-		sge->sge_length -= len;
-		if (sge->sge_length == 0) {
-			if (--ss->num_sge)
-				*sge = *ss->sg_list++;
-		} else if (sge->length == 0 && sge->mr != NULL) {
-			if (++sge->n >= IPATH_SEGSZ) {
-				if (++sge->m >= sge->mr->mapsz)
-					break;
-				sge->n = 0;
-			}
-			sge->vaddr =
-				sge->mr->map[sge->m]->segs[sge->n].vaddr;
-			sge->length =
-				sge->mr->map[sge->m]->segs[sge->n].length;
-		}
-		length -= len;
-	}
-}
-
-/*
- * Count the number of DMA descriptors needed to send length bytes of data.
- * Don't modify the ipath_sge_state to get the count.
- * Return zero if any of the segments is not aligned.
- */
-static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
-{
-	struct ipath_sge *sg_list = ss->sg_list;
-	struct ipath_sge sge = ss->sge;
-	u8 num_sge = ss->num_sge;
-	u32 ndesc = 1;	/* count the header */
-
-	while (length) {
-		u32 len = sge.length;
-
-		if (len > length)
-			len = length;
-		if (len > sge.sge_length)
-			len = sge.sge_length;
-		BUG_ON(len == 0);
-		if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
-		    (len != length && (len & (sizeof(u32) - 1)))) {
-			ndesc = 0;
-			break;
-		}
-		ndesc++;
-		sge.vaddr += len;
-		sge.length -= len;
-		sge.sge_length -= len;
-		if (sge.sge_length == 0) {
-			if (--num_sge)
-				sge = *sg_list++;
-		} else if (sge.length == 0 && sge.mr != NULL) {
-			if (++sge.n >= IPATH_SEGSZ) {
-				if (++sge.m >= sge.mr->mapsz)
-					break;
-				sge.n = 0;
-			}
-			sge.vaddr =
-				sge.mr->map[sge.m]->segs[sge.n].vaddr;
-			sge.length =
-				sge.mr->map[sge.m]->segs[sge.n].length;
-		}
-		length -= len;
-	}
-	return ndesc;
-}
-
-/*
- * Copy from the SGEs to the data buffer.
- */
-static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
-				u32 length)
-{
-	struct ipath_sge *sge = &ss->sge;
-
-	while (length) {
-		u32 len = sge->length;
-
-		if (len > length)
-			len = length;
-		if (len > sge->sge_length)
-			len = sge->sge_length;
-		BUG_ON(len == 0);
-		memcpy(data, sge->vaddr, len);
-		sge->vaddr += len;
-		sge->length -= len;
-		sge->sge_length -= len;
-		if (sge->sge_length == 0) {
-			if (--ss->num_sge)
-				*sge = *ss->sg_list++;
-		} else if (sge->length == 0 && sge->mr != NULL) {
-			if (++sge->n >= IPATH_SEGSZ) {
-				if (++sge->m >= sge->mr->mapsz)
-					break;
-				sge->n = 0;
-			}
-			sge->vaddr =
-				sge->mr->map[sge->m]->segs[sge->n].vaddr;
-			sge->length =
-				sge->mr->map[sge->m]->segs[sge->n].length;
-		}
-		data += len;
-		length -= len;
-	}
-}
-
-/**
- * ipath_post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
-{
-	struct ipath_swqe *wqe;
-	u32 next;
-	int i;
-	int j;
-	int acc;
-	int ret;
-	unsigned long flags;
-	struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	if (qp->ibqp.qp_type != IB_QPT_SMI &&
-	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-		ret = -ENETDOWN;
-		goto bail;
-	}
-
-	/* Check that state is OK to post send. */
-	if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
-		goto bail_inval;
-
-	/* IB spec says that num_sge == 0 is OK. */
-	if (wr->num_sge > qp->s_max_sge)
-		goto bail_inval;
-
-	/*
-	 * Don't allow RDMA reads or atomic operations on UC or
-	 * undefined operations.
-	 * Make sure buffer is large enough to hold the result for atomics.
-	 */
-	if (qp->ibqp.qp_type == IB_QPT_UC) {
-		if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
-			goto bail_inval;
-	} else if (qp->ibqp.qp_type == IB_QPT_UD) {
-		/* Check UD opcode */
-		if (wr->opcode != IB_WR_SEND &&
-		    wr->opcode != IB_WR_SEND_WITH_IMM)
-			goto bail_inval;
-		/* Check UD destination address PD */
-		if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
-			goto bail_inval;
-	} else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
-		goto bail_inval;
-	else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
-		   (wr->num_sge == 0 ||
-		    wr->sg_list[0].length < sizeof(u64) ||
-		    wr->sg_list[0].addr & (sizeof(u64) - 1)))
-		goto bail_inval;
-	else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
-		goto bail_inval;
-
-	next = qp->s_head + 1;
-	if (next >= qp->s_size)
-		next = 0;
-	if (next == qp->s_last) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	wqe = get_swqe_ptr(qp, qp->s_head);
-
-	if (qp->ibqp.qp_type != IB_QPT_UC &&
-	    qp->ibqp.qp_type != IB_QPT_RC)
-		memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
-	else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
-		 wr->opcode == IB_WR_RDMA_WRITE ||
-		 wr->opcode == IB_WR_RDMA_READ)
-		memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
-	else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-		 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
-		memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
-	else
-		memcpy(&wqe->wr, wr, sizeof(wqe->wr));
-
-	wqe->length = 0;
-	if (wr->num_sge) {
-		acc = wr->opcode >= IB_WR_RDMA_READ ?
-			IB_ACCESS_LOCAL_WRITE : 0;
-		for (i = 0, j = 0; i < wr->num_sge; i++) {
-			u32 length = wr->sg_list[i].length;
-			int ok;
-
-			if (length == 0)
-				continue;
-			ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
-					   &wr->sg_list[i], acc);
-			if (!ok)
-				goto bail_inval;
-			wqe->length += length;
-			j++;
-		}
-		wqe->wr.num_sge = j;
-	}
-	if (qp->ibqp.qp_type == IB_QPT_UC ||
-	    qp->ibqp.qp_type == IB_QPT_RC) {
-		if (wqe->length > 0x80000000U)
-			goto bail_inval;
-	} else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
-		goto bail_inval;
-	wqe->ssn = qp->s_ssn++;
-	qp->s_head = next;
-
-	ret = 0;
-	goto bail;
-
-bail_inval:
-	ret = -EINVAL;
-bail:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	return ret;
-}
-
-/**
- * ipath_post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-			   struct ib_send_wr **bad_wr)
-{
-	struct ipath_qp *qp = to_iqp(ibqp);
-	int err = 0;
-
-	for (; wr; wr = wr->next) {
-		err = ipath_post_one_send(qp, wr);
-		if (err) {
-			*bad_wr = wr;
-			goto bail;
-		}
-	}
-
-	/* Try to do the send work in the caller's context. */
-	ipath_do_send((unsigned long) qp);
-
-bail:
-	return err;
-}
-
-/**
- * ipath_post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-			      struct ib_recv_wr **bad_wr)
-{
-	struct ipath_qp *qp = to_iqp(ibqp);
-	struct ipath_rwq *wq = qp->r_rq.wq;
-	unsigned long flags;
-	int ret;
-
-	/* Check that state is OK to post receive. */
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
-		*bad_wr = wr;
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	for (; wr; wr = wr->next) {
-		struct ipath_rwqe *wqe;
-		u32 next;
-		int i;
-
-		if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
-			*bad_wr = wr;
-			ret = -EINVAL;
-			goto bail;
-		}
-
-		spin_lock_irqsave(&qp->r_rq.lock, flags);
-		next = wq->head + 1;
-		if (next >= qp->r_rq.size)
-			next = 0;
-		if (next == wq->tail) {
-			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-			*bad_wr = wr;
-			ret = -ENOMEM;
-			goto bail;
-		}
-
-		wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
-		wqe->wr_id = wr->wr_id;
-		wqe->num_sge = wr->num_sge;
-		for (i = 0; i < wr->num_sge; i++)
-			wqe->sg_list[i] = wr->sg_list[i];
-		/* Make sure queue entry is written before the head index. */
-		smp_wmb();
-		wq->head = next;
-		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-	}
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_qp_rcv - processing an incoming packet on a QP
- * @dev: the device the packet came on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_ib_rcv() to process an incoming packet
- * for the given QP.
- * Called at interrupt level.
- */
-static void ipath_qp_rcv(struct ipath_ibdev *dev,
-			 struct ipath_ib_header *hdr, int has_grh,
-			 void *data, u32 tlen, struct ipath_qp *qp)
-{
-	/* Check for valid receive state. */
-	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-		dev->n_pkt_drops++;
-		return;
-	}
-
-	switch (qp->ibqp.qp_type) {
-	case IB_QPT_SMI:
-	case IB_QPT_GSI:
-		if (ib_ipath_disable_sma)
-			break;
-		/* FALLTHROUGH */
-	case IB_QPT_UD:
-		ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
-		break;
-
-	case IB_QPT_RC:
-		ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
-		break;
-
-	case IB_QPT_UC:
-		ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
-		break;
-
-	default:
-		break;
-	}
-}
-
-/**
- * ipath_ib_rcv - process an incoming packet
- * @arg: the device pointer
- * @rhdr: the header of the packet
- * @data: the packet data
- * @tlen: the packet length
- *
- * This is called from ipath_kreceive() to process an incoming packet at
- * interrupt level. Tlen is the length of the header + data + CRC in bytes.
- */
-void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
-		  u32 tlen)
-{
-	struct ipath_ib_header *hdr = rhdr;
-	struct ipath_other_headers *ohdr;
-	struct ipath_qp *qp;
-	u32 qp_num;
-	int lnh;
-	u8 opcode;
-	u16 lid;
-
-	if (unlikely(dev == NULL))
-		goto bail;
-
-	if (unlikely(tlen < 24)) {	/* LRH+BTH+CRC */
-		dev->rcv_errors++;
-		goto bail;
-	}
-
-	/* Check for a valid destination LID (see ch. 7.11.1). */
-	lid = be16_to_cpu(hdr->lrh[1]);
-	if (lid < IPATH_MULTICAST_LID_BASE) {
-		lid &= ~((1 << dev->dd->ipath_lmc) - 1);
-		if (unlikely(lid != dev->dd->ipath_lid)) {
-			dev->rcv_errors++;
-			goto bail;
-		}
-	}
-
-	/* Check for GRH */
-	lnh = be16_to_cpu(hdr->lrh[0]) & 3;
-	if (lnh == IPATH_LRH_BTH)
-		ohdr = &hdr->u.oth;
-	else if (lnh == IPATH_LRH_GRH)
-		ohdr = &hdr->u.l.oth;
-	else {
-		dev->rcv_errors++;
-		goto bail;
-	}
-
-	opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
-	dev->opstats[opcode].n_bytes += tlen;
-	dev->opstats[opcode].n_packets++;
-
-	/* Get the destination QP number. */
-	qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
-	if (qp_num == IPATH_MULTICAST_QPN) {
-		struct ipath_mcast *mcast;
-		struct ipath_mcast_qp *p;
-
-		if (lnh != IPATH_LRH_GRH) {
-			dev->n_pkt_drops++;
-			goto bail;
-		}
-		mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
-		if (mcast == NULL) {
-			dev->n_pkt_drops++;
-			goto bail;
-		}
-		dev->n_multicast_rcv++;
-		list_for_each_entry_rcu(p, &mcast->qp_list, list)
-			ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
-		/*
-		 * Notify ipath_multicast_detach() if it is waiting for us
-		 * to finish.
-		 */
-		if (atomic_dec_return(&mcast->refcount) <= 1)
-			wake_up(&mcast->wait);
-	} else {
-		qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
-		if (qp) {
-			dev->n_unicast_rcv++;
-			ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
-				     tlen, qp);
-			/*
-			 * Notify ipath_destroy_qp() if it is waiting
-			 * for us to finish.
-			 */
-			if (atomic_dec_and_test(&qp->refcount))
-				wake_up(&qp->wait);
-		} else
-			dev->n_pkt_drops++;
-	}
-
-bail:;
-}
-
-/**
- * ipath_ib_timer - verbs timer
- * @arg: the device pointer
- *
- * This is called from ipath_do_rcv_timer() at interrupt level to check for
- * QPs which need retransmits and to collect performance numbers.
- */
-static void ipath_ib_timer(struct ipath_ibdev *dev)
-{
-	struct ipath_qp *resend = NULL;
-	struct ipath_qp *rnr = NULL;
-	struct list_head *last;
-	struct ipath_qp *qp;
-	unsigned long flags;
-
-	if (dev == NULL)
-		return;
-
-	spin_lock_irqsave(&dev->pending_lock, flags);
-	/* Start filling the next pending queue. */
-	if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
-		dev->pending_index = 0;
-	/* Save any requests still in the new queue, they have timed out. */
-	last = &dev->pending[dev->pending_index];
-	while (!list_empty(last)) {
-		qp = list_entry(last->next, struct ipath_qp, timerwait);
-		list_del_init(&qp->timerwait);
-		qp->timer_next = resend;
-		resend = qp;
-		atomic_inc(&qp->refcount);
-	}
-	last = &dev->rnrwait;
-	if (!list_empty(last)) {
-		qp = list_entry(last->next, struct ipath_qp, timerwait);
-		if (--qp->s_rnr_timeout == 0) {
-			do {
-				list_del_init(&qp->timerwait);
-				qp->timer_next = rnr;
-				rnr = qp;
-				atomic_inc(&qp->refcount);
-				if (list_empty(last))
-					break;
-				qp = list_entry(last->next, struct ipath_qp,
-						timerwait);
-			} while (qp->s_rnr_timeout == 0);
-		}
-	}
-	/*
-	 * We should only be in the started state if pma_sample_start != 0
-	 */
-	if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
-	    --dev->pma_sample_start == 0) {
-		dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
-		ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
-					&dev->ipath_rword,
-					&dev->ipath_spkts,
-					&dev->ipath_rpkts,
-					&dev->ipath_xmit_wait);
-	}
-	if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
-		if (dev->pma_sample_interval == 0) {
-			u64 ta, tb, tc, td, te;
-
-			dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
-			ipath_snapshot_counters(dev->dd, &ta, &tb,
-						&tc, &td, &te);
-
-			dev->ipath_sword = ta - dev->ipath_sword;
-			dev->ipath_rword = tb - dev->ipath_rword;
-			dev->ipath_spkts = tc - dev->ipath_spkts;
-			dev->ipath_rpkts = td - dev->ipath_rpkts;
-			dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
-		} else {
-			dev->pma_sample_interval--;
-		}
-	}
-	spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-	/* XXX What if timer fires again while this is running? */
-	while (resend != NULL) {
-		qp = resend;
-		resend = qp->timer_next;
-
-		spin_lock_irqsave(&qp->s_lock, flags);
-		if (qp->s_last != qp->s_tail &&
-		    ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
-			dev->n_timeouts++;
-			ipath_restart_rc(qp, qp->s_last_psn + 1);
-		}
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-
-		/* Notify ipath_destroy_qp() if it is waiting. */
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
-	}
-	while (rnr != NULL) {
-		qp = rnr;
-		rnr = qp->timer_next;
-
-		spin_lock_irqsave(&qp->s_lock, flags);
-		if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
-			ipath_schedule_send(qp);
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-
-		/* Notify ipath_destroy_qp() if it is waiting. */
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
-	}
-}
-
-static void update_sge(struct ipath_sge_state *ss, u32 length)
-{
-	struct ipath_sge *sge = &ss->sge;
-
-	sge->vaddr += length;
-	sge->length -= length;
-	sge->sge_length -= length;
-	if (sge->sge_length == 0) {
-		if (--ss->num_sge)
-			*sge = *ss->sg_list++;
-	} else if (sge->length == 0 && sge->mr != NULL) {
-		if (++sge->n >= IPATH_SEGSZ) {
-			if (++sge->m >= sge->mr->mapsz)
-				return;
-			sge->n = 0;
-		}
-		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
-		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
-	}
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-	return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-	return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-	data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
-	data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-	return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-	return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-	return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-	data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
-	data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-	return data;
-}
-#endif
-
-static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
-		    u32 length, unsigned flush_wc)
-{
-	u32 extra = 0;
-	u32 data = 0;
-	u32 last;
-
-	while (1) {
-		u32 len = ss->sge.length;
-		u32 off;
-
-		if (len > length)
-			len = length;
-		if (len > ss->sge.sge_length)
-			len = ss->sge.sge_length;
-		BUG_ON(len == 0);
-		/* If the source address is not aligned, try to align it. */
-		off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
-		if (off) {
-			u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
-					    ~(sizeof(u32) - 1));
-			u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
-			u32 y;
-
-			y = sizeof(u32) - off;
-			if (len > y)
-				len = y;
-			if (len + extra >= sizeof(u32)) {
-				data |= set_upper_bits(v, extra *
-						       BITS_PER_BYTE);
-				len = sizeof(u32) - extra;
-				if (len == length) {
-					last = data;
-					break;
-				}
-				__raw_writel(data, piobuf);
-				piobuf++;
-				extra = 0;
-				data = 0;
-			} else {
-				/* Clear unused upper bytes */
-				data |= clear_upper_bytes(v, len, extra);
-				if (len == length) {
-					last = data;
-					break;
-				}
-				extra += len;
-			}
-		} else if (extra) {
-			/* Source address is aligned. */
-			u32 *addr = (u32 *) ss->sge.vaddr;
-			int shift = extra * BITS_PER_BYTE;
-			int ushift = 32 - shift;
-			u32 l = len;
-
-			while (l >= sizeof(u32)) {
-				u32 v = *addr;
-
-				data |= set_upper_bits(v, shift);
-				__raw_writel(data, piobuf);
-				data = get_upper_bits(v, ushift);
-				piobuf++;
-				addr++;
-				l -= sizeof(u32);
-			}
-			/*
-			 * We still have 'extra' number of bytes leftover.
-			 */
-			if (l) {
-				u32 v = *addr;
-
-				if (l + extra >= sizeof(u32)) {
-					data |= set_upper_bits(v, shift);
-					len -= l + extra - sizeof(u32);
-					if (len == length) {
-						last = data;
-						break;
-					}
-					__raw_writel(data, piobuf);
-					piobuf++;
-					extra = 0;
-					data = 0;
-				} else {
-					/* Clear unused upper bytes */
-					data |= clear_upper_bytes(v, l,
-								  extra);
-					if (len == length) {
-						last = data;
-						break;
-					}
-					extra += l;
-				}
-			} else if (len == length) {
-				last = data;
-				break;
-			}
-		} else if (len == length) {
-			u32 w;
-
-			/*
-			 * Need to round up for the last dword in the
-			 * packet.
-			 */
-			w = (len + 3) >> 2;
-			__iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
-			piobuf += w - 1;
-			last = ((u32 *) ss->sge.vaddr)[w - 1];
-			break;
-		} else {
-			u32 w = len >> 2;
-
-			__iowrite32_copy(piobuf, ss->sge.vaddr, w);
-			piobuf += w;
-
-			extra = len & (sizeof(u32) - 1);
-			if (extra) {
-				u32 v = ((u32 *) ss->sge.vaddr)[w];
-
-				/* Clear unused upper bytes */
-				data = clear_upper_bytes(v, extra, 0);
-			}
-		}
-		update_sge(ss, len);
-		length -= len;
-	}
-	/* Update address before sending packet. */
-	update_sge(ss, length);
-	if (flush_wc) {
-		/* must flush early everything before trigger word */
-		ipath_flush_wc();
-		__raw_writel(last, piobuf);
-		/* be sure trigger word is written */
-		ipath_flush_wc();
-	} else
-		__raw_writel(last, piobuf);
-}
-
-/*
- * Convert IB rate to delay multiplier.
- */
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
-{
-	switch (rate) {
-	case IB_RATE_2_5_GBPS: return 8;
-	case IB_RATE_5_GBPS:   return 4;
-	case IB_RATE_10_GBPS:  return 2;
-	case IB_RATE_20_GBPS:  return 1;
-	default:	       return 0;
-	}
-}
-
-/*
- * Convert delay multiplier to IB rate
- */
-static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
-{
-	switch (mult) {
-	case 8:  return IB_RATE_2_5_GBPS;
-	case 4:  return IB_RATE_5_GBPS;
-	case 2:  return IB_RATE_10_GBPS;
-	case 1:  return IB_RATE_20_GBPS;
-	default: return IB_RATE_PORT_CURRENT;
-	}
-}
-
-static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
-{
-	struct ipath_verbs_txreq *tx = NULL;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->pending_lock, flags);
-	if (!list_empty(&dev->txreq_free)) {
-		struct list_head *l = dev->txreq_free.next;
-
-		list_del(l);
-		tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
-	}
-	spin_unlock_irqrestore(&dev->pending_lock, flags);
-	return tx;
-}
-
-static inline void put_txreq(struct ipath_ibdev *dev,
-			     struct ipath_verbs_txreq *tx)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->pending_lock, flags);
-	list_add(&tx->txreq.list, &dev->txreq_free);
-	spin_unlock_irqrestore(&dev->pending_lock, flags);
-}
-
-static void sdma_complete(void *cookie, int status)
-{
-	struct ipath_verbs_txreq *tx = cookie;
-	struct ipath_qp *qp = tx->qp;
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	unsigned long flags;
-	enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
-		IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
-
-	if (atomic_dec_and_test(&qp->s_dma_busy)) {
-		spin_lock_irqsave(&qp->s_lock, flags);
-		if (tx->wqe)
-			ipath_send_complete(qp, tx->wqe, ibs);
-		if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
-		     qp->s_last != qp->s_head) ||
-		    (qp->s_flags & IPATH_S_WAIT_DMA))
-			ipath_schedule_send(qp);
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-		wake_up(&qp->wait_dma);
-	} else if (tx->wqe) {
-		spin_lock_irqsave(&qp->s_lock, flags);
-		ipath_send_complete(qp, tx->wqe, ibs);
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-	}
-
-	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
-		kfree(tx->txreq.map_addr);
-	put_txreq(dev, tx);
-
-	if (atomic_dec_and_test(&qp->refcount))
-		wake_up(&qp->wait);
-}
-
-static void decrement_dma_busy(struct ipath_qp *qp)
-{
-	unsigned long flags;
-
-	if (atomic_dec_and_test(&qp->s_dma_busy)) {
-		spin_lock_irqsave(&qp->s_lock, flags);
-		if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
-		     qp->s_last != qp->s_head) ||
-		    (qp->s_flags & IPATH_S_WAIT_DMA))
-			ipath_schedule_send(qp);
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-		wake_up(&qp->wait_dma);
-	}
-}
-
-/*
- * Compute the number of clock cycles of delay before sending the next packet.
- * The multipliers reflect the number of clocks for the fastest rate so
- * one tick at 4xDDR is 8 ticks at 1xSDR.
- * If the destination port will take longer to receive a packet than
- * the outgoing link can send it, we need to delay sending the next packet
- * by the difference in time it takes the receiver to receive and the sender
- * to send this packet.
- * Note that this delay is always correct for UC and RC but not always
- * optimal for UD. For UD, the destination HCA can be different for each
- * packet, in which case, we could send packets to a different destination
- * while "waiting" for the delay. The overhead for doing this without
- * HW support is more than just paying the cost of delaying some packets
- * unnecessarily.
- */
-static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
-{
-	return (rcv_mult > snd_mult) ?
-		(plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
-}
-
-static int ipath_verbs_send_dma(struct ipath_qp *qp,
-				struct ipath_ib_header *hdr, u32 hdrwords,
-				struct ipath_sge_state *ss, u32 len,
-				u32 plen, u32 dwords)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	struct ipath_devdata *dd = dev->dd;
-	struct ipath_verbs_txreq *tx;
-	u32 *piobuf;
-	u32 control;
-	u32 ndesc;
-	int ret;
-
-	tx = qp->s_tx;
-	if (tx) {
-		qp->s_tx = NULL;
-		/* resend previously constructed packet */
-		atomic_inc(&qp->s_dma_busy);
-		ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
-		if (ret) {
-			qp->s_tx = tx;
-			decrement_dma_busy(qp);
-		}
-		goto bail;
-	}
-
-	tx = get_txreq(dev);
-	if (!tx) {
-		ret = -EBUSY;
-		goto bail;
-	}
-
-	/*
-	 * Get the saved delay count we computed for the previous packet
-	 * and save the delay count for this packet to be used next time
-	 * we get here.
-	 */
-	control = qp->s_pkt_delay;
-	qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
-	tx->qp = qp;
-	atomic_inc(&qp->refcount);
-	tx->wqe = qp->s_wqe;
-	tx->txreq.callback = sdma_complete;
-	tx->txreq.callback_cookie = tx;
-	tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
-		IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
-	if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
-		tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
-
-	/* VL15 packets bypass credit check */
-	if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
-		control |= 1ULL << 31;
-		tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
-	}
-
-	if (len) {
-		/*
-		 * Don't try to DMA if it takes more descriptors than
-		 * the queue holds.
-		 */
-		ndesc = ipath_count_sge(ss, len);
-		if (ndesc >= dd->ipath_sdma_descq_cnt)
-			ndesc = 0;
-	} else
-		ndesc = 1;
-	if (ndesc) {
-		tx->hdr.pbc[0] = cpu_to_le32(plen);
-		tx->hdr.pbc[1] = cpu_to_le32(control);
-		memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
-		tx->txreq.sg_count = ndesc;
-		tx->map_len = (hdrwords + 2) << 2;
-		tx->txreq.map_addr = &tx->hdr;
-		atomic_inc(&qp->s_dma_busy);
-		ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
-		if (ret) {
-			/* save ss and length in dwords */
-			tx->ss = ss;
-			tx->len = dwords;
-			qp->s_tx = tx;
-			decrement_dma_busy(qp);
-		}
-		goto bail;
-	}
-
-	/* Allocate a buffer and copy the header and payload to it. */
-	tx->map_len = (plen + 1) << 2;
-	piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
-	if (unlikely(piobuf == NULL)) {
-		ret = -EBUSY;
-		goto err_tx;
-	}
-	tx->txreq.map_addr = piobuf;
-	tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
-	tx->txreq.sg_count = 1;
-
-	*piobuf++ = (__force u32) cpu_to_le32(plen);
-	*piobuf++ = (__force u32) cpu_to_le32(control);
-	memcpy(piobuf, hdr, hdrwords << 2);
-	ipath_copy_from_sge(piobuf + hdrwords, ss, len);
-
-	atomic_inc(&qp->s_dma_busy);
-	ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
-	/*
-	 * If we couldn't queue the DMA request, save the info
-	 * and try again later rather than destroying the
-	 * buffer and undoing the side effects of the copy.
-	 */
-	if (ret) {
-		tx->ss = NULL;
-		tx->len = 0;
-		qp->s_tx = tx;
-		decrement_dma_busy(qp);
-	}
-	dev->n_unaligned++;
-	goto bail;
-
-err_tx:
-	if (atomic_dec_and_test(&qp->refcount))
-		wake_up(&qp->wait);
-	put_txreq(dev, tx);
-bail:
-	return ret;
-}
-
-static int ipath_verbs_send_pio(struct ipath_qp *qp,
-				struct ipath_ib_header *ibhdr, u32 hdrwords,
-				struct ipath_sge_state *ss, u32 len,
-				u32 plen, u32 dwords)
-{
-	struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-	u32 *hdr = (u32 *) ibhdr;
-	u32 __iomem *piobuf;
-	unsigned flush_wc;
-	u32 control;
-	int ret;
-	unsigned long flags;
-
-	piobuf = ipath_getpiobuf(dd, plen, NULL);
-	if (unlikely(piobuf == NULL)) {
-		ret = -EBUSY;
-		goto bail;
-	}
-
-	/*
-	 * Get the saved delay count we computed for the previous packet
-	 * and save the delay count for this packet to be used next time
-	 * we get here.
-	 */
-	control = qp->s_pkt_delay;
-	qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
-	/* VL15 packets bypass credit check */
-	if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
-		control |= 1ULL << 31;
-
-	/*
-	 * Write the length to the control qword plus any needed flags.
-	 * We have to flush after the PBC for correctness on some cpus
-	 * or WC buffer can be written out of order.
-	 */
-	writeq(((u64) control << 32) | plen, piobuf);
-	piobuf += 2;
-
-	flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
-	if (len == 0) {
-		/*
-		 * If there is just the header portion, must flush before
-		 * writing last word of header for correctness, and after
-		 * the last header word (trigger word).
-		 */
-		if (flush_wc) {
-			ipath_flush_wc();
-			__iowrite32_copy(piobuf, hdr, hdrwords - 1);
-			ipath_flush_wc();
-			__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
-			ipath_flush_wc();
-		} else
-			__iowrite32_copy(piobuf, hdr, hdrwords);
-		goto done;
-	}
-
-	if (flush_wc)
-		ipath_flush_wc();
-	__iowrite32_copy(piobuf, hdr, hdrwords);
-	piobuf += hdrwords;
-
-	/* The common case is aligned and contained in one segment. */
-	if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
-		   !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
-		u32 *addr = (u32 *) ss->sge.vaddr;
-
-		/* Update address before sending packet. */
-		update_sge(ss, len);
-		if (flush_wc) {
-			__iowrite32_copy(piobuf, addr, dwords - 1);
-			/* must flush early everything before trigger word */
-			ipath_flush_wc();
-			__raw_writel(addr[dwords - 1], piobuf + dwords - 1);
-			/* be sure trigger word is written */
-			ipath_flush_wc();
-		} else
-			__iowrite32_copy(piobuf, addr, dwords);
-		goto done;
-	}
-	copy_io(piobuf, ss, len, flush_wc);
-done:
-	if (qp->s_wqe) {
-		spin_lock_irqsave(&qp->s_lock, flags);
-		ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-	}
-	ret = 0;
-bail:
-	return ret;
-}
-
-/**
- * ipath_verbs_send - send a packet
- * @qp: the QP to send on
- * @hdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
- */
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
-		     u32 hdrwords, struct ipath_sge_state *ss, u32 len)
-{
-	struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-	u32 plen;
-	int ret;
-	u32 dwords = (len + 3) >> 2;
-
-	/*
-	 * Calculate the send buffer trigger address.
-	 * The +1 counts for the pbc control dword following the pbc length.
-	 */
-	plen = hdrwords + dwords + 1;
-
-	/*
-	 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
-	 * can defer SDMA restart until link goes ACTIVE without
-	 * worrying about just how we got there.
-	 */
-	if (qp->ibqp.qp_type == IB_QPT_SMI ||
-	    !(dd->ipath_flags & IPATH_HAS_SEND_DMA))
-		ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
-					   plen, dwords);
-	else
-		ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
-					   plen, dwords);
-
-	return ret;
-}
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-			    u64 *rwords, u64 *spkts, u64 *rpkts,
-			    u64 *xmit_wait)
-{
-	int ret;
-
-	if (!(dd->ipath_flags & IPATH_INITTED)) {
-		/* no hardware, freeze, etc. */
-		ret = -EINVAL;
-		goto bail;
-	}
-	*swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
-	*rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-	*spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-	*rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-	*xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_get_counters - get various chip counters
- * @dd: the infinipath device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int ipath_get_counters(struct ipath_devdata *dd,
-		       struct ipath_verbs_counters *cntrs)
-{
-	struct ipath_cregs const *crp = dd->ipath_cregs;
-	int ret;
-
-	if (!(dd->ipath_flags & IPATH_INITTED)) {
-		/* no hardware, freeze, etc. */
-		ret = -EINVAL;
-		goto bail;
-	}
-	cntrs->symbol_error_counter =
-		ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
-	cntrs->link_error_recovery_counter =
-		ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
-	/*
-	 * The link downed counter counts when the other side downs the
-	 * connection.  We add in the number of times we downed the link
-	 * due to local link integrity errors to compensate.
-	 */
-	cntrs->link_downed_counter =
-		ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
-	cntrs->port_rcv_errors =
-		ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
-		ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
-		ipath_snap_cntr(dd, crp->cr_portovflcnt) +
-		ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
-		ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
-		ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
-		ipath_snap_cntr(dd, crp->cr_erricrccnt) +
-		ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
-		ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
-		ipath_snap_cntr(dd, crp->cr_badformatcnt) +
-		dd->ipath_rxfc_unsupvl_errs;
-	if (crp->cr_rxotherlocalphyerrcnt)
-		cntrs->port_rcv_errors +=
-			ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
-	if (crp->cr_rxvlerrcnt)
-		cntrs->port_rcv_errors +=
-			ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
-	cntrs->port_rcv_remphys_errors =
-		ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
-	cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
-	cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
-	cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
-	cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
-	cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
-	cntrs->local_link_integrity_errors =
-		crp->cr_locallinkintegrityerrcnt ?
-		ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
-		((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-		 dd->ipath_lli_errs : dd->ipath_lli_errors);
-	cntrs->excessive_buffer_overrun_errors =
-		crp->cr_excessbufferovflcnt ?
-		ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
-		dd->ipath_overrun_thresh_errs;
-	cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
-		ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_ib_piobufavail - callback when a PIO buffer is available
- * @arg: the device pointer
- *
- * This is called from ipath_intr() at interrupt level when a PIO buffer is
- * available after ipath_verbs_send() returned an error that no buffers were
- * available.  Return 1 if we consumed all the PIO buffers and we still have
- * QPs waiting for buffers (for now, just restart the send tasklet and
- * return zero).
- */
-int ipath_ib_piobufavail(struct ipath_ibdev *dev)
-{
-	struct list_head *list;
-	struct ipath_qp *qplist;
-	struct ipath_qp *qp;
-	unsigned long flags;
-
-	if (dev == NULL)
-		goto bail;
-
-	list = &dev->piowait;
-	qplist = NULL;
-
-	spin_lock_irqsave(&dev->pending_lock, flags);
-	while (!list_empty(list)) {
-		qp = list_entry(list->next, struct ipath_qp, piowait);
-		list_del_init(&qp->piowait);
-		qp->pio_next = qplist;
-		qplist = qp;
-		atomic_inc(&qp->refcount);
-	}
-	spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-	while (qplist != NULL) {
-		qp = qplist;
-		qplist = qp->pio_next;
-
-		spin_lock_irqsave(&qp->s_lock, flags);
-		if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
-			ipath_schedule_send(qp);
-		spin_unlock_irqrestore(&qp->s_lock, flags);
-
-		/* Notify ipath_destroy_qp() if it is waiting. */
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
-	}
-
-bail:
-	return 0;
-}
-
-static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-			      struct ib_udata *uhw)
-{
-	struct ipath_ibdev *dev = to_idev(ibdev);
-
-	if (uhw->inlen || uhw->outlen)
-		return -EINVAL;
-
-	memset(props, 0, sizeof(*props));
-
-	props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
-		IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
-		IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
-		IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
-	props->page_size_cap = PAGE_SIZE;
-	props->vendor_id =
-		IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3;
-	props->vendor_part_id = dev->dd->ipath_deviceid;
-	props->hw_ver = dev->dd->ipath_pcirev;
-
-	props->sys_image_guid = dev->sys_image_guid;
-
-	props->max_mr_size = ~0ull;
-	props->max_qp = ib_ipath_max_qps;
-	props->max_qp_wr = ib_ipath_max_qp_wrs;
-	props->max_sge = ib_ipath_max_sges;
-	props->max_sge_rd = ib_ipath_max_sges;
-	props->max_cq = ib_ipath_max_cqs;
-	props->max_ah = ib_ipath_max_ahs;
-	props->max_cqe = ib_ipath_max_cqes;
-	props->max_mr = dev->lk_table.max;
-	props->max_fmr = dev->lk_table.max;
-	props->max_map_per_fmr = 32767;
-	props->max_pd = ib_ipath_max_pds;
-	props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
-	props->max_qp_init_rd_atom = 255;
-	/* props->max_res_rd_atom */
-	props->max_srq = ib_ipath_max_srqs;
-	props->max_srq_wr = ib_ipath_max_srq_wrs;
-	props->max_srq_sge = ib_ipath_max_srq_sges;
-	/* props->local_ca_ack_delay */
-	props->atomic_cap = IB_ATOMIC_GLOB;
-	props->max_pkeys = ipath_get_npkeys(dev->dd);
-	props->max_mcast_grp = ib_ipath_max_mcast_grps;
-	props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
-	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
-		props->max_mcast_grp;
-
-	return 0;
-}
-
-const u8 ipath_cvt_physportstate[32] = {
-	[INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
-	[INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
-	[INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
-	[INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
-	[INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
-	[INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
-	[INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
-		IB_PHYSPORTSTATE_CFG_TRAIN,
-	[INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
-		IB_PHYSPORTSTATE_CFG_TRAIN,
-	[INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
-		IB_PHYSPORTSTATE_CFG_TRAIN,
-	[INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
-		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-	[INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
-		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-	[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
-		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-	[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
-	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
-{
-	return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
-}
-
-static int ipath_query_port(struct ib_device *ibdev,
-			    u8 port, struct ib_port_attr *props)
-{
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_devdata *dd = dev->dd;
-	enum ib_mtu mtu;
-	u16 lid = dd->ipath_lid;
-	u64 ibcstat;
-
-	memset(props, 0, sizeof(*props));
-	props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
-	props->lmc = dd->ipath_lmc;
-	props->sm_lid = dev->sm_lid;
-	props->sm_sl = dev->sm_sl;
-	ibcstat = dd->ipath_lastibcstat;
-	/* map LinkState to IB portinfo values.  */
-	props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
-
-	/* See phys_state_show() */
-	props->phys_state = /* MEA: assumes shift == 0 */
-		ipath_cvt_physportstate[dd->ipath_lastibcstat &
-		dd->ibcs_lts_mask];
-	props->port_cap_flags = dev->port_cap_flags;
-	props->gid_tbl_len = 1;
-	props->max_msg_sz = 0x80000000;
-	props->pkey_tbl_len = ipath_get_npkeys(dd);
-	props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
-		dev->z_pkey_violations;
-	props->qkey_viol_cntr = dev->qkey_violations;
-	props->active_width = dd->ipath_link_width_active;
-	/* See rate_show() */
-	props->active_speed = dd->ipath_link_speed_active;
-	props->max_vl_num = 1;		/* VLCap = VL0 */
-	props->init_type_reply = 0;
-
-	props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
-	switch (dd->ipath_ibmtu) {
-	case 4096:
-		mtu = IB_MTU_4096;
-		break;
-	case 2048:
-		mtu = IB_MTU_2048;
-		break;
-	case 1024:
-		mtu = IB_MTU_1024;
-		break;
-	case 512:
-		mtu = IB_MTU_512;
-		break;
-	case 256:
-		mtu = IB_MTU_256;
-		break;
-	default:
-		mtu = IB_MTU_2048;
-	}
-	props->active_mtu = mtu;
-	props->subnet_timeout = dev->subnet_timeout;
-
-	return 0;
-}
-
-static int ipath_modify_device(struct ib_device *device,
-			       int device_modify_mask,
-			       struct ib_device_modify *device_modify)
-{
-	int ret;
-
-	if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
-				   IB_DEVICE_MODIFY_NODE_DESC)) {
-		ret = -EOPNOTSUPP;
-		goto bail;
-	}
-
-	if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
-		memcpy(device->node_desc, device_modify->node_desc, 64);
-
-	if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
-		to_idev(device)->sys_image_guid =
-			cpu_to_be64(device_modify->sys_image_guid);
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-static int ipath_modify_port(struct ib_device *ibdev,
-			     u8 port, int port_modify_mask,
-			     struct ib_port_modify *props)
-{
-	struct ipath_ibdev *dev = to_idev(ibdev);
-
-	dev->port_cap_flags |= props->set_port_cap_mask;
-	dev->port_cap_flags &= ~props->clr_port_cap_mask;
-	if (port_modify_mask & IB_PORT_SHUTDOWN)
-		ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
-	if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
-		dev->qkey_violations = 0;
-	return 0;
-}
-
-static int ipath_query_gid(struct ib_device *ibdev, u8 port,
-			   int index, union ib_gid *gid)
-{
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	int ret;
-
-	if (index >= 1) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	gid->global.subnet_prefix = dev->gid_prefix;
-	gid->global.interface_id = dev->dd->ipath_guid;
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
-				    struct ib_ucontext *context,
-				    struct ib_udata *udata)
-{
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_pd *pd;
-	struct ib_pd *ret;
-
-	/*
-	 * This is actually totally arbitrary.	Some correctness tests
-	 * assume there's a maximum number of PDs that can be allocated.
-	 * We don't actually have this limit, but we fail the test if
-	 * we allow allocations of more than we report for this value.
-	 */
-
-	pd = kmalloc(sizeof *pd, GFP_KERNEL);
-	if (!pd) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
-	spin_lock(&dev->n_pds_lock);
-	if (dev->n_pds_allocated == ib_ipath_max_pds) {
-		spin_unlock(&dev->n_pds_lock);
-		kfree(pd);
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
-	dev->n_pds_allocated++;
-	spin_unlock(&dev->n_pds_lock);
-
-	/* ib_alloc_pd() will initialize pd->ibpd. */
-	pd->user = udata != NULL;
-
-	ret = &pd->ibpd;
-
-bail:
-	return ret;
-}
-
-static int ipath_dealloc_pd(struct ib_pd *ibpd)
-{
-	struct ipath_pd *pd = to_ipd(ibpd);
-	struct ipath_ibdev *dev = to_idev(ibpd->device);
-
-	spin_lock(&dev->n_pds_lock);
-	dev->n_pds_allocated--;
-	spin_unlock(&dev->n_pds_lock);
-
-	kfree(pd);
-
-	return 0;
-}
-
-/**
- * ipath_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
-				     struct ib_ah_attr *ah_attr)
-{
-	struct ipath_ah *ah;
-	struct ib_ah *ret;
-	struct ipath_ibdev *dev = to_idev(pd->device);
-	unsigned long flags;
-
-	/* A multicast address requires a GRH (see ch. 8.4.1). */
-	if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
-	    ah_attr->dlid != IPATH_PERMISSIVE_LID &&
-	    !(ah_attr->ah_flags & IB_AH_GRH)) {
-		ret = ERR_PTR(-EINVAL);
-		goto bail;
-	}
-
-	if (ah_attr->dlid == 0) {
-		ret = ERR_PTR(-EINVAL);
-		goto bail;
-	}
-
-	if (ah_attr->port_num < 1 ||
-	    ah_attr->port_num > pd->device->phys_port_cnt) {
-		ret = ERR_PTR(-EINVAL);
-		goto bail;
-	}
-
-	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
-	if (!ah) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
-	spin_lock_irqsave(&dev->n_ahs_lock, flags);
-	if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
-		spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-		kfree(ah);
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
-	dev->n_ahs_allocated++;
-	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
-	/* ib_create_ah() will initialize ah->ibah. */
-	ah->attr = *ah_attr;
-	ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
-
-	ret = &ah->ibah;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int ipath_destroy_ah(struct ib_ah *ibah)
-{
-	struct ipath_ibdev *dev = to_idev(ibah->device);
-	struct ipath_ah *ah = to_iah(ibah);
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->n_ahs_lock, flags);
-	dev->n_ahs_allocated--;
-	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
-	kfree(ah);
-
-	return 0;
-}
-
-static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
-	struct ipath_ah *ah = to_iah(ibah);
-
-	*ah_attr = ah->attr;
-	ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
-
-	return 0;
-}
-
-/**
- * ipath_get_npkeys - return the size of the PKEY table for port 0
- * @dd: the infinipath device
- */
-unsigned ipath_get_npkeys(struct ipath_devdata *dd)
-{
-	return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
-}
-
-/**
- * ipath_get_pkey - return the indexed PKEY from the port PKEY table
- * @dd: the infinipath device
- * @index: the PKEY index
- */
-unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
-{
-	unsigned ret;
-
-	/* always a kernel port, no locking needed */
-	if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
-		ret = 0;
-	else
-		ret = dd->ipath_pd[0]->port_pkeys[index];
-
-	return ret;
-}
-
-static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
-			    u16 *pkey)
-{
-	struct ipath_ibdev *dev = to_idev(ibdev);
-	int ret;
-
-	if (index >= ipath_get_npkeys(dev->dd)) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	*pkey = ipath_get_pkey(dev->dd, index);
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the InfiniPath driver
- */
-
-static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
-						struct ib_udata *udata)
-{
-	struct ipath_ucontext *context;
-	struct ib_ucontext *ret;
-
-	context = kmalloc(sizeof *context, GFP_KERNEL);
-	if (!context) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
-	ret = &context->ibucontext;
-
-bail:
-	return ret;
-}
-
-static int ipath_dealloc_ucontext(struct ib_ucontext *context)
-{
-	kfree(to_iucontext(context));
-	return 0;
-}
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev);
-
-static void __verbs_timer(unsigned long arg)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *) arg;
-
-	/* Handle verbs layer timeouts. */
-	ipath_ib_timer(dd->verbs_dev);
-
-	mod_timer(&dd->verbs_timer, jiffies + 1);
-}
-
-static int enable_timer(struct ipath_devdata *dd)
-{
-	/*
-	 * Early chips had a design flaw where the chip and kernel idea
-	 * of the tail register don't always agree, and therefore we won't
-	 * get an interrupt on the next packet received.
-	 * If the board supports per packet receive interrupts, use it.
-	 * Otherwise, the timer function periodically checks for packets
-	 * to cover this case.
-	 * Either way, the timer is needed for verbs layer related
-	 * processing.
-	 */
-	if (dd->ipath_flags & IPATH_GPIO_INTR) {
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
-				 0x2074076542310ULL);
-		/* Enable GPIO bit 2 interrupt */
-		dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-				 dd->ipath_gpio_mask);
-	}
-
-	setup_timer(&dd->verbs_timer, __verbs_timer, (unsigned long)dd);
-
-	dd->verbs_timer.expires = jiffies + 1;
-	add_timer(&dd->verbs_timer);
-
-	return 0;
-}
-
-static int disable_timer(struct ipath_devdata *dd)
-{
-	/* Disable GPIO bit 2 interrupt */
-	if (dd->ipath_flags & IPATH_GPIO_INTR) {
-                /* Disable GPIO bit 2 interrupt */
-		dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-				 dd->ipath_gpio_mask);
-		/*
-		 * We might want to undo changes to debugportselect,
-		 * but how?
-		 */
-	}
-
-	del_timer_sync(&dd->verbs_timer);
-
-	return 0;
-}
-
-static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num,
-			        struct ib_port_immutable *immutable)
-{
-	struct ib_port_attr attr;
-	int err;
-
-	err = ipath_query_port(ibdev, port_num, &attr);
-	if (err)
-		return err;
-
-	immutable->pkey_tbl_len = attr.pkey_tbl_len;
-	immutable->gid_tbl_len = attr.gid_tbl_len;
-	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
-	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
-	return 0;
-}
-
-/**
- * ipath_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return the allocated ipath_ibdev pointer or NULL on error.
- */
-int ipath_register_ib_device(struct ipath_devdata *dd)
-{
-	struct ipath_verbs_counters cntrs;
-	struct ipath_ibdev *idev;
-	struct ib_device *dev;
-	struct ipath_verbs_txreq *tx;
-	unsigned i;
-	int ret;
-
-	idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
-	if (idev == NULL) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	dev = &idev->ibdev;
-
-	if (dd->ipath_sdma_descq_cnt) {
-		tx = kmalloc_array(dd->ipath_sdma_descq_cnt, sizeof *tx,
-				   GFP_KERNEL);
-		if (tx == NULL) {
-			ret = -ENOMEM;
-			goto err_tx;
-		}
-	} else
-		tx = NULL;
-	idev->txreq_bufs = tx;
-
-	/* Only need to initialize non-zero fields. */
-	spin_lock_init(&idev->n_pds_lock);
-	spin_lock_init(&idev->n_ahs_lock);
-	spin_lock_init(&idev->n_cqs_lock);
-	spin_lock_init(&idev->n_qps_lock);
-	spin_lock_init(&idev->n_srqs_lock);
-	spin_lock_init(&idev->n_mcast_grps_lock);
-
-	spin_lock_init(&idev->qp_table.lock);
-	spin_lock_init(&idev->lk_table.lock);
-	idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
-	/* Set the prefix to the default value (see ch. 4.1.1) */
-	idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
-
-	ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
-	if (ret)
-		goto err_qp;
-
-	/*
-	 * The top ib_ipath_lkey_table_size bits are used to index the
-	 * table.  The lower 8 bits can be owned by the user (copied from
-	 * the LKEY).  The remaining bits act as a generation number or tag.
-	 */
-	idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
-	idev->lk_table.table = kcalloc(idev->lk_table.max,
-				       sizeof(*idev->lk_table.table),
-				       GFP_KERNEL);
-	if (idev->lk_table.table == NULL) {
-		ret = -ENOMEM;
-		goto err_lk;
-	}
-	INIT_LIST_HEAD(&idev->pending_mmaps);
-	spin_lock_init(&idev->pending_lock);
-	idev->mmap_offset = PAGE_SIZE;
-	spin_lock_init(&idev->mmap_offset_lock);
-	INIT_LIST_HEAD(&idev->pending[0]);
-	INIT_LIST_HEAD(&idev->pending[1]);
-	INIT_LIST_HEAD(&idev->pending[2]);
-	INIT_LIST_HEAD(&idev->piowait);
-	INIT_LIST_HEAD(&idev->rnrwait);
-	INIT_LIST_HEAD(&idev->txreq_free);
-	idev->pending_index = 0;
-	idev->port_cap_flags =
-		IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
-	if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
-		idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
-	idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
-	idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
-	idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
-	idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
-	idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
-	/* Snapshot current HW counters to "clear" them. */
-	ipath_get_counters(dd, &cntrs);
-	idev->z_symbol_error_counter = cntrs.symbol_error_counter;
-	idev->z_link_error_recovery_counter =
-		cntrs.link_error_recovery_counter;
-	idev->z_link_downed_counter = cntrs.link_downed_counter;
-	idev->z_port_rcv_errors = cntrs.port_rcv_errors;
-	idev->z_port_rcv_remphys_errors =
-		cntrs.port_rcv_remphys_errors;
-	idev->z_port_xmit_discards = cntrs.port_xmit_discards;
-	idev->z_port_xmit_data = cntrs.port_xmit_data;
-	idev->z_port_rcv_data = cntrs.port_rcv_data;
-	idev->z_port_xmit_packets = cntrs.port_xmit_packets;
-	idev->z_port_rcv_packets = cntrs.port_rcv_packets;
-	idev->z_local_link_integrity_errors =
-		cntrs.local_link_integrity_errors;
-	idev->z_excessive_buffer_overrun_errors =
-		cntrs.excessive_buffer_overrun_errors;
-	idev->z_vl15_dropped = cntrs.vl15_dropped;
-
-	for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
-		list_add(&tx->txreq.list, &idev->txreq_free);
-
-	/*
-	 * The system image GUID is supposed to be the same for all
-	 * IB HCAs in a single system but since there can be other
-	 * device types in the system, we can't be sure this is unique.
-	 */
-	if (!sys_image_guid)
-		sys_image_guid = dd->ipath_guid;
-	idev->sys_image_guid = sys_image_guid;
-	idev->ib_unit = dd->ipath_unit;
-	idev->dd = dd;
-
-	strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
-	dev->owner = THIS_MODULE;
-	dev->node_guid = dd->ipath_guid;
-	dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
-	dev->uverbs_cmd_mask =
-		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
-		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
-		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
-		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
-		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
-		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
-		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)		|
-		(1ull << IB_USER_VERBS_CMD_QUERY_AH)		|
-		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
-		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
-		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
-		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
-		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
-		(1ull << IB_USER_VERBS_CMD_POLL_CQ)		|
-		(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)	|
-		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
-		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
-		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
-		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
-		(1ull << IB_USER_VERBS_CMD_POST_SEND)		|
-		(1ull << IB_USER_VERBS_CMD_POST_RECV)		|
-		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
-		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
-		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
-		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
-		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
-		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
-		(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
-	dev->node_type = RDMA_NODE_IB_CA;
-	dev->phys_port_cnt = 1;
-	dev->num_comp_vectors = 1;
-	dev->dma_device = &dd->pcidev->dev;
-	dev->query_device = ipath_query_device;
-	dev->modify_device = ipath_modify_device;
-	dev->query_port = ipath_query_port;
-	dev->modify_port = ipath_modify_port;
-	dev->query_pkey = ipath_query_pkey;
-	dev->query_gid = ipath_query_gid;
-	dev->alloc_ucontext = ipath_alloc_ucontext;
-	dev->dealloc_ucontext = ipath_dealloc_ucontext;
-	dev->alloc_pd = ipath_alloc_pd;
-	dev->dealloc_pd = ipath_dealloc_pd;
-	dev->create_ah = ipath_create_ah;
-	dev->destroy_ah = ipath_destroy_ah;
-	dev->query_ah = ipath_query_ah;
-	dev->create_srq = ipath_create_srq;
-	dev->modify_srq = ipath_modify_srq;
-	dev->query_srq = ipath_query_srq;
-	dev->destroy_srq = ipath_destroy_srq;
-	dev->create_qp = ipath_create_qp;
-	dev->modify_qp = ipath_modify_qp;
-	dev->query_qp = ipath_query_qp;
-	dev->destroy_qp = ipath_destroy_qp;
-	dev->post_send = ipath_post_send;
-	dev->post_recv = ipath_post_receive;
-	dev->post_srq_recv = ipath_post_srq_receive;
-	dev->create_cq = ipath_create_cq;
-	dev->destroy_cq = ipath_destroy_cq;
-	dev->resize_cq = ipath_resize_cq;
-	dev->poll_cq = ipath_poll_cq;
-	dev->req_notify_cq = ipath_req_notify_cq;
-	dev->get_dma_mr = ipath_get_dma_mr;
-	dev->reg_user_mr = ipath_reg_user_mr;
-	dev->dereg_mr = ipath_dereg_mr;
-	dev->alloc_fmr = ipath_alloc_fmr;
-	dev->map_phys_fmr = ipath_map_phys_fmr;
-	dev->unmap_fmr = ipath_unmap_fmr;
-	dev->dealloc_fmr = ipath_dealloc_fmr;
-	dev->attach_mcast = ipath_multicast_attach;
-	dev->detach_mcast = ipath_multicast_detach;
-	dev->process_mad = ipath_process_mad;
-	dev->mmap = ipath_mmap;
-	dev->dma_ops = &ipath_dma_mapping_ops;
-	dev->get_port_immutable = ipath_port_immutable;
-
-	snprintf(dev->node_desc, sizeof(dev->node_desc),
-		 IPATH_IDSTR " %s", init_utsname()->nodename);
-
-	ret = ib_register_device(dev, NULL);
-	if (ret)
-		goto err_reg;
-
-	ret = ipath_verbs_register_sysfs(dev);
-	if (ret)
-		goto err_class;
-
-	enable_timer(dd);
-
-	goto bail;
-
-err_class:
-	ib_unregister_device(dev);
-err_reg:
-	kfree(idev->lk_table.table);
-err_lk:
-	kfree(idev->qp_table.table);
-err_qp:
-	kfree(idev->txreq_bufs);
-err_tx:
-	ib_dealloc_device(dev);
-	ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
-	idev = NULL;
-
-bail:
-	dd->verbs_dev = idev;
-	return ret;
-}
-
-void ipath_unregister_ib_device(struct ipath_ibdev *dev)
-{
-	struct ib_device *ibdev = &dev->ibdev;
-	u32 qps_inuse;
-
-	ib_unregister_device(ibdev);
-
-	disable_timer(dev->dd);
-
-	if (!list_empty(&dev->pending[0]) ||
-	    !list_empty(&dev->pending[1]) ||
-	    !list_empty(&dev->pending[2]))
-		ipath_dev_err(dev->dd, "pending list not empty!\n");
-	if (!list_empty(&dev->piowait))
-		ipath_dev_err(dev->dd, "piowait list not empty!\n");
-	if (!list_empty(&dev->rnrwait))
-		ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
-	if (!ipath_mcast_tree_empty())
-		ipath_dev_err(dev->dd, "multicast table memory leak!\n");
-	/*
-	 * Note that ipath_unregister_ib_device() can be called before all
-	 * the QPs are destroyed!
-	 */
-	qps_inuse = ipath_free_all_qps(&dev->qp_table);
-	if (qps_inuse)
-		ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n",
-			qps_inuse);
-	kfree(dev->qp_table.table);
-	kfree(dev->lk_table.table);
-	kfree(dev->txreq_bufs);
-	ib_dealloc_device(ibdev);
-}
-
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
-			char *buf)
-{
-	struct ipath_ibdev *dev =
-		container_of(device, struct ipath_ibdev, ibdev.dev);
-
-	return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
-			char *buf)
-{
-	struct ipath_ibdev *dev =
-		container_of(device, struct ipath_ibdev, ibdev.dev);
-	int ret;
-
-	ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
-	if (ret < 0)
-		goto bail;
-	strcat(buf, "\n");
-	ret = strlen(buf);
-
-bail:
-	return ret;
-}
-
-static ssize_t show_stats(struct device *device, struct device_attribute *attr,
-			  char *buf)
-{
-	struct ipath_ibdev *dev =
-		container_of(device, struct ipath_ibdev, ibdev.dev);
-	int i;
-	int len;
-
-	len = sprintf(buf,
-		      "RC resends  %d\n"
-		      "RC no QACK  %d\n"
-		      "RC ACKs     %d\n"
-		      "RC SEQ NAKs %d\n"
-		      "RC RDMA seq %d\n"
-		      "RC RNR NAKs %d\n"
-		      "RC OTH NAKs %d\n"
-		      "RC timeouts %d\n"
-		      "RC RDMA dup %d\n"
-		      "piobuf wait %d\n"
-		      "unaligned   %d\n"
-		      "PKT drops   %d\n"
-		      "WQE errs    %d\n",
-		      dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
-		      dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
-		      dev->n_other_naks, dev->n_timeouts,
-		      dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
-		      dev->n_pkt_drops, dev->n_wqe_errs);
-	for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
-		const struct ipath_opcode_stats *si = &dev->opstats[i];
-
-		if (!si->n_packets && !si->n_bytes)
-			continue;
-		len += sprintf(buf + len, "%02x %llu/%llu\n", i,
-			       (unsigned long long) si->n_packets,
-			       (unsigned long long) si->n_bytes);
-	}
-	return len;
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
-
-static struct device_attribute *ipath_class_attributes[] = {
-	&dev_attr_hw_rev,
-	&dev_attr_hca_type,
-	&dev_attr_board_id,
-	&dev_attr_stats
-};
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev)
-{
-	int i;
-	int ret;
-
-	for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
-		ret = device_create_file(&dev->dev,
-				       ipath_class_attributes[i]);
-		if (ret)
-			goto bail;
-	}
-	return 0;
-bail:
-	for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
-		device_remove_file(&dev->dev, ipath_class_attributes[i]);
-	return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
deleted file mode 100644
index 6c70a89..0000000
--- a/drivers/staging/rdma/ipath/ipath_verbs.h
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef IPATH_VERBS_H
-#define IPATH_VERBS_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-
-#include "ipath_kernel.h"
-
-#define IPATH_MAX_RDMA_ATOMIC	4
-
-#define QPN_MAX                 (1 << 24)
-#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define IPATH_UVERBS_ABI_VERSION       2
-
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE	(IB_CQ_NEXT_COMP + 1)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK			0x20
-#define IB_NAK_PSN_ERROR		0x60
-#define IB_NAK_INVALID_REQUEST		0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR	0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST	0x64
-
-/* Flags for checking QP state (see ib_ipath_state_ops[]) */
-#define IPATH_POST_SEND_OK		0x01
-#define IPATH_POST_RECV_OK		0x02
-#define IPATH_PROCESS_RECV_OK		0x04
-#define IPATH_PROCESS_SEND_OK		0x08
-#define IPATH_PROCESS_NEXT_SEND_OK	0x10
-#define IPATH_FLUSH_SEND		0x20
-#define IPATH_FLUSH_RECV		0x40
-#define IPATH_PROCESS_OR_FLUSH_SEND \
-	(IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE	0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED	0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING	0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA	cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA	cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS	cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS	cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT	cpu_to_be16(0x0005)
-
-struct ib_reth {
-	__be64 vaddr;
-	__be32 rkey;
-	__be32 length;
-} __attribute__ ((packed));
-
-struct ib_atomic_eth {
-	__be32 vaddr[2];	/* unaligned so access as 2 32-bit words */
-	__be32 rkey;
-	__be64 swap_data;
-	__be64 compare_data;
-} __attribute__ ((packed));
-
-struct ipath_other_headers {
-	__be32 bth[3];
-	union {
-		struct {
-			__be32 deth[2];
-			__be32 imm_data;
-		} ud;
-		struct {
-			struct ib_reth reth;
-			__be32 imm_data;
-		} rc;
-		struct {
-			__be32 aeth;
-			__be32 atomic_ack_eth[2];
-		} at;
-		__be32 imm_data;
-		__be32 aeth;
-		struct ib_atomic_eth atomic_eth;
-	} u;
-} __attribute__ ((packed));
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
- * will be in the eager header buffer.  The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct ipath_ib_header {
-	__be16 lrh[4];
-	union {
-		struct {
-			struct ib_grh grh;
-			struct ipath_other_headers oth;
-		} l;
-		struct ipath_other_headers oth;
-	} u;
-} __attribute__ ((packed));
-
-struct ipath_pio_header {
-	__le32 pbc[2];
-	struct ipath_ib_header hdr;
-} __attribute__ ((packed));
-
-/*
- * There is one struct ipath_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct ipath_mcast_qp.
- */
-struct ipath_mcast_qp {
-	struct list_head list;
-	struct ipath_qp *qp;
-};
-
-struct ipath_mcast {
-	struct rb_node rb_node;
-	union ib_gid mgid;
-	struct list_head qp_list;
-	wait_queue_head_t wait;
-	atomic_t refcount;
-	int n_attached;
-};
-
-/* Protection domain */
-struct ipath_pd {
-	struct ib_pd ibpd;
-	int user;		/* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct ipath_ah {
-	struct ib_ah ibah;
-	struct ib_ah_attr attr;
-};
-
-/*
- * This structure is used by ipath_mmap() to validate an offset
- * when an mmap() request is made.  The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct ipath_mmap_info {
-	struct list_head pending_mmaps;
-	struct ib_ucontext *context;
-	void *obj;
-	__u64 offset;
-	struct kref ref;
-	unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct ipath_cq_wc {
-	u32 head;		/* index of next entry to fill */
-	u32 tail;		/* index of next ib_poll_cq() entry */
-	union {
-		/* these are actually size ibcq.cqe + 1 */
-		struct ib_uverbs_wc uqueue[0];
-		struct ib_wc kqueue[0];
-	};
-};
-
-/*
- * The completion queue structure.
- */
-struct ipath_cq {
-	struct ib_cq ibcq;
-	struct tasklet_struct comptask;
-	spinlock_t lock;
-	u8 notify;
-	u8 triggered;
-	struct ipath_cq_wc *queue;
-	struct ipath_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct ipath_seg {
-	void *vaddr;
-	size_t length;
-};
-
-/* The number of ipath_segs that fit in a page. */
-#define IPATH_SEGSZ     (PAGE_SIZE / sizeof (struct ipath_seg))
-
-struct ipath_segarray {
-	struct ipath_seg segs[IPATH_SEGSZ];
-};
-
-struct ipath_mregion {
-	struct ib_pd *pd;	/* shares refcnt of ibmr.pd */
-	u64 user_base;		/* User's address for this region */
-	u64 iova;		/* IB start address of this region */
-	size_t length;
-	u32 lkey;
-	u32 offset;		/* offset (bytes) to start of region */
-	int access_flags;
-	u32 max_segs;		/* number of ipath_segs in all the arrays */
-	u32 mapsz;		/* size of the map array */
-	struct ipath_segarray *map[0];	/* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct ipath_sge {
-	struct ipath_mregion *mr;
-	void *vaddr;		/* kernel virtual address of segment */
-	u32 sge_length;		/* length of the SGE */
-	u32 length;		/* remaining length of the segment */
-	u16 m;			/* current index: mr->map[m] */
-	u16 n;			/* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct ipath_mr {
-	struct ib_mr ibmr;
-	struct ib_umem *umem;
-	struct ipath_mregion mr;	/* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct ipath_swqe {
-	union {
-		struct ib_send_wr wr;   /* don't use wr.sg_list */
-		struct ib_ud_wr ud_wr;
-		struct ib_rdma_wr rdma_wr;
-		struct ib_atomic_wr atomic_wr;
-	};
-
-	u32 psn;		/* first packet sequence number */
-	u32 lpsn;		/* last packet sequence number */
-	u32 ssn;		/* send sequence number */
-	u32 length;		/* total length of data in sg_list */
-	struct ipath_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct ipath_rwqe {
-	u64 wr_id;
-	u8 num_sge;
-	struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct ipath_rwq {
-	u32 head;		/* new work requests posted to the head */
-	u32 tail;		/* receives pull requests from here. */
-	struct ipath_rwqe wq[0];
-};
-
-struct ipath_rq {
-	struct ipath_rwq *wq;
-	spinlock_t lock;
-	u32 size;		/* size of RWQE array */
-	u8 max_sge;
-};
-
-struct ipath_srq {
-	struct ib_srq ibsrq;
-	struct ipath_rq rq;
-	struct ipath_mmap_info *ip;
-	/* send signal when number of RWQEs < limit */
-	u32 limit;
-};
-
-struct ipath_sge_state {
-	struct ipath_sge *sg_list;      /* next SGE to be used if any */
-	struct ipath_sge sge;   /* progress state for the current SGE */
-	u8 num_sge;
-	u8 static_rate;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct ipath_ack_entry {
-	u8 opcode;
-	u8 sent;
-	u32 psn;
-	union {
-		struct ipath_sge_state rdma_sge;
-		u64 atomic_data;
-	};
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct ipath_qp {
-	struct ib_qp ibqp;
-	struct ipath_qp *next;		/* link list for QPN hash table */
-	struct ipath_qp *timer_next;	/* link list for ipath_ib_timer() */
-	struct ipath_qp *pio_next;	/* link for ipath_ib_piobufavail() */
-	struct list_head piowait;	/* link for wait PIO buf */
-	struct list_head timerwait;	/* link for waiting for timeouts */
-	struct ib_ah_attr remote_ah_attr;
-	struct ipath_ib_header s_hdr;	/* next packet header to send */
-	atomic_t refcount;
-	wait_queue_head_t wait;
-	wait_queue_head_t wait_dma;
-	struct tasklet_struct s_task;
-	struct ipath_mmap_info *ip;
-	struct ipath_sge_state *s_cur_sge;
-	struct ipath_verbs_txreq *s_tx;
-	struct ipath_sge_state s_sge;	/* current send request data */
-	struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
-	struct ipath_sge_state s_ack_rdma_sge;
-	struct ipath_sge_state s_rdma_read_sge;
-	struct ipath_sge_state r_sge;	/* current receive data */
-	spinlock_t s_lock;
-	atomic_t s_dma_busy;
-	u16 s_pkt_delay;
-	u16 s_hdrwords;		/* size of s_hdr in 32 bit words */
-	u32 s_cur_size;		/* size of send packet in bytes */
-	u32 s_len;		/* total length of s_sge */
-	u32 s_rdma_read_len;	/* total length of s_rdma_read_sge */
-	u32 s_next_psn;		/* PSN for next request */
-	u32 s_last_psn;		/* last response PSN processed */
-	u32 s_psn;		/* current packet sequence number */
-	u32 s_ack_rdma_psn;	/* PSN for sending RDMA read responses */
-	u32 s_ack_psn;		/* PSN for acking sends and RDMA writes */
-	u32 s_rnr_timeout;	/* number of milliseconds for RNR timeout */
-	u32 r_ack_psn;		/* PSN for next ACK or atomic ACK */
-	u64 r_wr_id;		/* ID for current receive WQE */
-	unsigned long r_aflags;
-	u32 r_len;		/* total length of r_sge */
-	u32 r_rcv_len;		/* receive data len processed */
-	u32 r_psn;		/* expected rcv packet sequence number */
-	u32 r_msn;		/* message sequence number */
-	u8 state;		/* QP state */
-	u8 s_state;		/* opcode of last packet sent */
-	u8 s_ack_state;		/* opcode of packet to ACK */
-	u8 s_nak_state;		/* non-zero if NAK is pending */
-	u8 r_state;		/* opcode of last packet received */
-	u8 r_nak_state;		/* non-zero if NAK is pending */
-	u8 r_min_rnr_timer;	/* retry timeout value for RNR NAKs */
-	u8 r_flags;
-	u8 r_max_rd_atomic;	/* max number of RDMA read/atomic to receive */
-	u8 r_head_ack_queue;	/* index into s_ack_queue[] */
-	u8 qp_access_flags;
-	u8 s_max_sge;		/* size of s_wq->sg_list */
-	u8 s_retry_cnt;		/* number of times to retry */
-	u8 s_rnr_retry_cnt;
-	u8 s_retry;		/* requester retry counter */
-	u8 s_rnr_retry;		/* requester RNR retry counter */
-	u8 s_pkey_index;	/* PKEY index to use */
-	u8 s_max_rd_atomic;	/* max number of RDMA read/atomic to send */
-	u8 s_num_rd_atomic;	/* number of RDMA read/atomic pending */
-	u8 s_tail_ack_queue;	/* index into s_ack_queue[] */
-	u8 s_flags;
-	u8 s_dmult;
-	u8 s_draining;
-	u8 timeout;		/* Timeout for this QP */
-	enum ib_mtu path_mtu;
-	u32 remote_qpn;
-	u32 qkey;		/* QKEY for this QP (for UD or RD) */
-	u32 s_size;		/* send work queue size */
-	u32 s_head;		/* new entries added here */
-	u32 s_tail;		/* next entry to process */
-	u32 s_cur;		/* current work queue entry */
-	u32 s_last;		/* last un-ACK'ed entry */
-	u32 s_ssn;		/* SSN of tail entry */
-	u32 s_lsn;		/* limit sequence number (credit) */
-	struct ipath_swqe *s_wq;	/* send work queue */
-	struct ipath_swqe *s_wqe;
-	struct ipath_sge *r_ud_sg_list;
-	struct ipath_rq r_rq;		/* receive work queue */
-	struct ipath_sge r_sg_list[0];	/* verified SGEs */
-};
-
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define IPATH_R_WRID_VALID	0
-
-/*
- * Bit definitions for r_flags.
- */
-#define IPATH_R_REUSE_SGE	0x01
-#define IPATH_R_RDMAR_SEQ	0x02
-
-/*
- * Bit definitions for s_flags.
- *
- * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
- *			   before processing the next SWQE
- * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
- *			   before processing the next SWQE
- * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
- * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- *		      next send completion entry not via send DMA.
- */
-#define IPATH_S_SIGNAL_REQ_WR	0x01
-#define IPATH_S_FENCE_PENDING	0x02
-#define IPATH_S_RDMAR_PENDING	0x04
-#define IPATH_S_ACK_PENDING	0x08
-#define IPATH_S_BUSY		0x10
-#define IPATH_S_WAITING		0x20
-#define IPATH_S_WAIT_SSN_CREDIT	0x40
-#define IPATH_S_WAIT_DMA	0x80
-
-#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
-	IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
-
-#define IPATH_PSN_CREDIT	512
-
-/*
- * Since struct ipath_swqe is not a fixed size, we can't simply index into
- * struct ipath_qp.s_wq.  This function does the array index computation.
- */
-static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
-					      unsigned n)
-{
-	return (struct ipath_swqe *)((char *)qp->s_wq +
-				     (sizeof(struct ipath_swqe) +
-				      qp->s_max_sge *
-				      sizeof(struct ipath_sge)) * n);
-}
-
-/*
- * Since struct ipath_rwqe is not a fixed size, we can't simply index into
- * struct ipath_rwq.wq.  This function does the array index computation.
- */
-static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
-					      unsigned n)
-{
-	return (struct ipath_rwqe *)
-		((char *) rq->wq->wq +
-		 (sizeof(struct ipath_rwqe) +
-		  rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
-	atomic_t n_free;
-	void *page;
-};
-
-struct ipath_qp_table {
-	spinlock_t lock;
-	u32 last;		/* last QP number allocated */
-	u32 max;		/* size of the hash table */
-	u32 nmaps;		/* size of the map table */
-	struct ipath_qp **table;
-	/* bit map of free numbers */
-	struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-struct ipath_lkey_table {
-	spinlock_t lock;
-	u32 next;		/* next unused index (speeds search) */
-	u32 gen;		/* generation count */
-	u32 max;		/* size of the table */
-	struct ipath_mregion **table;
-};
-
-struct ipath_opcode_stats {
-	u64 n_packets;		/* number of packets */
-	u64 n_bytes;		/* total number of bytes */
-};
-
-struct ipath_ibdev {
-	struct ib_device ibdev;
-	struct ipath_devdata *dd;
-	struct list_head pending_mmaps;
-	spinlock_t mmap_offset_lock;
-	u32 mmap_offset;
-	int ib_unit;		/* This is the device number */
-	u16 sm_lid;		/* in host order */
-	u8 sm_sl;
-	u8 mkeyprot;
-	/* non-zero when timer is set */
-	unsigned long mkey_lease_timeout;
-
-	/* The following fields are really per port. */
-	struct ipath_qp_table qp_table;
-	struct ipath_lkey_table lk_table;
-	struct list_head pending[3];	/* FIFO of QPs waiting for ACKs */
-	struct list_head piowait;	/* list for wait PIO buf */
-	struct list_head txreq_free;
-	void *txreq_bufs;
-	/* list of QPs waiting for RNR timer */
-	struct list_head rnrwait;
-	spinlock_t pending_lock;
-	__be64 sys_image_guid;	/* in network order */
-	__be64 gid_prefix;	/* in network order */
-	__be64 mkey;
-
-	u32 n_pds_allocated;	/* number of PDs allocated for device */
-	spinlock_t n_pds_lock;
-	u32 n_ahs_allocated;	/* number of AHs allocated for device */
-	spinlock_t n_ahs_lock;
-	u32 n_cqs_allocated;	/* number of CQs allocated for device */
-	spinlock_t n_cqs_lock;
-	u32 n_qps_allocated;	/* number of QPs allocated for device */
-	spinlock_t n_qps_lock;
-	u32 n_srqs_allocated;	/* number of SRQs allocated for device */
-	spinlock_t n_srqs_lock;
-	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
-	spinlock_t n_mcast_grps_lock;
-
-	u64 ipath_sword;	/* total dwords sent (sample result) */
-	u64 ipath_rword;	/* total dwords received (sample result) */
-	u64 ipath_spkts;	/* total packets sent (sample result) */
-	u64 ipath_rpkts;	/* total packets received (sample result) */
-	/* # of ticks no data sent (sample result) */
-	u64 ipath_xmit_wait;
-	u64 rcv_errors;		/* # of packets with SW detected rcv errs */
-	u64 n_unicast_xmit;	/* total unicast packets sent */
-	u64 n_unicast_rcv;	/* total unicast packets received */
-	u64 n_multicast_xmit;	/* total multicast packets sent */
-	u64 n_multicast_rcv;	/* total multicast packets received */
-	u64 z_symbol_error_counter;		/* starting count for PMA */
-	u64 z_link_error_recovery_counter;	/* starting count for PMA */
-	u64 z_link_downed_counter;		/* starting count for PMA */
-	u64 z_port_rcv_errors;			/* starting count for PMA */
-	u64 z_port_rcv_remphys_errors;		/* starting count for PMA */
-	u64 z_port_xmit_discards;		/* starting count for PMA */
-	u64 z_port_xmit_data;			/* starting count for PMA */
-	u64 z_port_rcv_data;			/* starting count for PMA */
-	u64 z_port_xmit_packets;		/* starting count for PMA */
-	u64 z_port_rcv_packets;			/* starting count for PMA */
-	u32 z_pkey_violations;			/* starting count for PMA */
-	u32 z_local_link_integrity_errors;	/* starting count for PMA */
-	u32 z_excessive_buffer_overrun_errors;	/* starting count for PMA */
-	u32 z_vl15_dropped;			/* starting count for PMA */
-	u32 n_rc_resends;
-	u32 n_rc_acks;
-	u32 n_rc_qacks;
-	u32 n_seq_naks;
-	u32 n_rdma_seq;
-	u32 n_rnr_naks;
-	u32 n_other_naks;
-	u32 n_timeouts;
-	u32 n_pkt_drops;
-	u32 n_vl15_dropped;
-	u32 n_wqe_errs;
-	u32 n_rdma_dup_busy;
-	u32 n_piowait;
-	u32 n_unaligned;
-	u32 port_cap_flags;
-	u32 pma_sample_start;
-	u32 pma_sample_interval;
-	__be16 pma_counter_select[5];
-	u16 pma_tag;
-	u16 qkey_violations;
-	u16 mkey_violations;
-	u16 mkey_lease_period;
-	u16 pending_index;	/* which pending queue is active */
-	u8 pma_sample_status;
-	u8 subnet_timeout;
-	u8 vl_high_limit;
-	struct ipath_opcode_stats opstats[128];
-};
-
-struct ipath_verbs_counters {
-	u64 symbol_error_counter;
-	u64 link_error_recovery_counter;
-	u64 link_downed_counter;
-	u64 port_rcv_errors;
-	u64 port_rcv_remphys_errors;
-	u64 port_xmit_discards;
-	u64 port_xmit_data;
-	u64 port_rcv_data;
-	u64 port_xmit_packets;
-	u64 port_rcv_packets;
-	u32 local_link_integrity_errors;
-	u32 excessive_buffer_overrun_errors;
-	u32 vl15_dropped;
-};
-
-struct ipath_verbs_txreq {
-	struct ipath_qp         *qp;
-	struct ipath_swqe       *wqe;
-	u32                      map_len;
-	u32                      len;
-	struct ipath_sge_state  *ss;
-	struct ipath_pio_header  hdr;
-	struct ipath_sdma_txreq  txreq;
-};
-
-static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
-{
-	return container_of(ibmr, struct ipath_mr, ibmr);
-}
-
-static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
-{
-	return container_of(ibpd, struct ipath_pd, ibpd);
-}
-
-static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
-{
-	return container_of(ibah, struct ipath_ah, ibah);
-}
-
-static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
-{
-	return container_of(ibcq, struct ipath_cq, ibcq);
-}
-
-static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
-{
-	return container_of(ibsrq, struct ipath_srq, ibsrq);
-}
-
-static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
-{
-	return container_of(ibqp, struct ipath_qp, ibqp);
-}
-
-static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
-{
-	return container_of(ibdev, struct ipath_ibdev, ibdev);
-}
-
-/*
- * This must be called with s_lock held.
- */
-static inline void ipath_schedule_send(struct ipath_qp *qp)
-{
-	if (qp->s_flags & IPATH_S_ANY_WAIT)
-		qp->s_flags &= ~IPATH_S_ANY_WAIT;
-	if (!(qp->s_flags & IPATH_S_BUSY))
-		tasklet_hi_schedule(&qp->s_task);
-}
-
-int ipath_process_mad(struct ib_device *ibdev,
-		      int mad_flags,
-		      u8 port_num,
-		      const struct ib_wc *in_wc,
-		      const struct ib_grh *in_grh,
-		      const struct ib_mad_hdr *in, size_t in_mad_size,
-		      struct ib_mad_hdr *out, size_t *out_mad_size,
-		      u16 *out_mad_pkey_index);
-
-/*
- * Compare the lower 24 bits of the two values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int ipath_cmp24(u32 a, u32 b)
-{
-	return (((int) a) - ((int) b)) << 8;
-}
-
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-			    u64 *rwords, u64 *spkts, u64 *rpkts,
-			    u64 *xmit_wait);
-
-int ipath_get_counters(struct ipath_devdata *dd,
-		       struct ipath_verbs_counters *cntrs);
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_mcast_tree_empty(void);
-
-__be32 ipath_compute_aeth(struct ipath_qp *qp);
-
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
-
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
-			      struct ib_qp_init_attr *init_attr,
-			      struct ib_udata *udata);
-
-int ipath_destroy_qp(struct ib_qp *ibqp);
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-		    int attr_mask, struct ib_udata *udata);
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-		   int attr_mask, struct ib_qp_init_attr *init_attr);
-
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
-
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
-
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
-
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
-
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
-		     u32 hdrwords, struct ipath_sge_state *ss, u32 len);
-
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
-
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
-
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
-
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
-		     struct ipath_mregion *mr);
-
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
-
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
-		  struct ib_sge *sge, int acc);
-
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
-		  u32 len, u64 vaddr, u32 rkey, int acc);
-
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
-			   struct ib_recv_wr **bad_wr);
-
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
-				struct ib_srq_init_attr *srq_init_attr,
-				struct ib_udata *udata);
-
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-		     enum ib_srq_attr_mask attr_mask,
-		     struct ib_udata *udata);
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int ipath_destroy_srq(struct ib_srq *ibsrq);
-
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
-
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
-			      const struct ib_cq_init_attr *attr,
-			      struct ib_ucontext *context,
-			      struct ib_udata *udata);
-
-int ipath_destroy_cq(struct ib_cq *ibcq);
-
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-				u64 virt_addr, int mr_access_flags,
-				struct ib_udata *udata);
-
-int ipath_dereg_mr(struct ib_mr *ibmr);
-
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-			       struct ib_fmr_attr *fmr_attr);
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
-		       int list_len, u64 iova);
-
-int ipath_unmap_fmr(struct list_head *fmr_list);
-
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
-
-void ipath_release_mmap_info(struct kref *ref);
-
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
-					       u32 size,
-					       struct ib_ucontext *context,
-					       void *obj);
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
-			    struct ipath_mmap_info *ip,
-			    u32 size, void *obj);
-
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-void ipath_insert_rnr_queue(struct ipath_qp *qp);
-
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
-		   u32 *lengthp, struct ipath_sge_state *ss);
-
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
-
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
-		   struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
-			   struct ipath_other_headers *ohdr,
-			   u32 bth0, u32 bth2);
-
-void ipath_do_send(unsigned long data);
-
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
-			 enum ib_wc_status status);
-
-int ipath_make_rc_req(struct ipath_qp *qp);
-
-int ipath_make_uc_req(struct ipath_qp *qp);
-
-int ipath_make_ud_req(struct ipath_qp *qp);
-
-int ipath_register_ib_device(struct ipath_devdata *);
-
-void ipath_unregister_ib_device(struct ipath_ibdev *);
-
-void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
-
-int ipath_ib_piobufavail(struct ipath_ibdev *);
-
-unsigned ipath_get_npkeys(struct ipath_devdata *);
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *);
-
-unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
-
-extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
-
-/*
- * Below converts HCA-specific LinkTrainingState to IB PhysPortState
- * values.
- */
-extern const u8 ipath_cvt_physportstate[];
-#define IB_PHYSPORTSTATE_SLEEP 1
-#define IB_PHYSPORTSTATE_POLL 2
-#define IB_PHYSPORTSTATE_DISABLED 3
-#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-#define IB_PHYSPORTSTATE_LINKUP 5
-#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
-
-extern const int ib_ipath_state_ops[];
-
-extern unsigned int ib_ipath_lkey_table_size;
-
-extern unsigned int ib_ipath_max_cqes;
-
-extern unsigned int ib_ipath_max_cqs;
-
-extern unsigned int ib_ipath_max_qp_wrs;
-
-extern unsigned int ib_ipath_max_qps;
-
-extern unsigned int ib_ipath_max_sges;
-
-extern unsigned int ib_ipath_max_mcast_grps;
-
-extern unsigned int ib_ipath_max_mcast_qp_attached;
-
-extern unsigned int ib_ipath_max_srqs;
-
-extern unsigned int ib_ipath_max_srq_sges;
-
-extern unsigned int ib_ipath_max_srq_wrs;
-
-extern const u32 ib_ipath_rnr_table[];
-
-extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
-
-#endif				/* IPATH_VERBS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c b/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
deleted file mode 100644
index 72d476f..0000000
--- a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/rculist.h>
-#include <linux/slab.h>
-
-#include "ipath_verbs.h"
-
-/*
- * Global table of GID to attached QPs.
- * The table is global to all ipath devices since a send from one QP/device
- * needs to be locally routed to any locally attached QPs on the same
- * or different device.
- */
-static struct rb_root mcast_tree;
-static DEFINE_SPINLOCK(mcast_lock);
-
-/**
- * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
- * @qp: the QP to link
- */
-static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
-{
-	struct ipath_mcast_qp *mqp;
-
-	mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
-	if (!mqp)
-		goto bail;
-
-	mqp->qp = qp;
-	atomic_inc(&qp->refcount);
-
-bail:
-	return mqp;
-}
-
-static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
-{
-	struct ipath_qp *qp = mqp->qp;
-
-	/* Notify ipath_destroy_qp() if it is waiting. */
-	if (atomic_dec_and_test(&qp->refcount))
-		wake_up(&qp->wait);
-
-	kfree(mqp);
-}
-
-/**
- * ipath_mcast_alloc - allocate the multicast GID structure
- * @mgid: the multicast GID
- *
- * A list of QPs will be attached to this structure.
- */
-static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
-{
-	struct ipath_mcast *mcast;
-
-	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
-	if (!mcast)
-		goto bail;
-
-	mcast->mgid = *mgid;
-	INIT_LIST_HEAD(&mcast->qp_list);
-	init_waitqueue_head(&mcast->wait);
-	atomic_set(&mcast->refcount, 0);
-	mcast->n_attached = 0;
-
-bail:
-	return mcast;
-}
-
-static void ipath_mcast_free(struct ipath_mcast *mcast)
-{
-	struct ipath_mcast_qp *p, *tmp;
-
-	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
-		ipath_mcast_qp_free(p);
-
-	kfree(mcast);
-}
-
-/**
- * ipath_mcast_find - search the global table for the given multicast GID
- * @mgid: the multicast GID to search for
- *
- * Returns NULL if not found.
- *
- * The caller is responsible for decrementing the reference count if found.
- */
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
-{
-	struct rb_node *n;
-	unsigned long flags;
-	struct ipath_mcast *mcast;
-
-	spin_lock_irqsave(&mcast_lock, flags);
-	n = mcast_tree.rb_node;
-	while (n) {
-		int ret;
-
-		mcast = rb_entry(n, struct ipath_mcast, rb_node);
-
-		ret = memcmp(mgid->raw, mcast->mgid.raw,
-			     sizeof(union ib_gid));
-		if (ret < 0)
-			n = n->rb_left;
-		else if (ret > 0)
-			n = n->rb_right;
-		else {
-			atomic_inc(&mcast->refcount);
-			spin_unlock_irqrestore(&mcast_lock, flags);
-			goto bail;
-		}
-	}
-	spin_unlock_irqrestore(&mcast_lock, flags);
-
-	mcast = NULL;
-
-bail:
-	return mcast;
-}
-
-/**
- * ipath_mcast_add - insert mcast GID into table and attach QP struct
- * @mcast: the mcast GID table
- * @mqp: the QP to attach
- *
- * Return zero if both were added.  Return EEXIST if the GID was already in
- * the table but the QP was added.  Return ESRCH if the QP was already
- * attached and neither structure was added.
- */
-static int ipath_mcast_add(struct ipath_ibdev *dev,
-			   struct ipath_mcast *mcast,
-			   struct ipath_mcast_qp *mqp)
-{
-	struct rb_node **n = &mcast_tree.rb_node;
-	struct rb_node *pn = NULL;
-	int ret;
-
-	spin_lock_irq(&mcast_lock);
-
-	while (*n) {
-		struct ipath_mcast *tmcast;
-		struct ipath_mcast_qp *p;
-
-		pn = *n;
-		tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
-
-		ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
-			     sizeof(union ib_gid));
-		if (ret < 0) {
-			n = &pn->rb_left;
-			continue;
-		}
-		if (ret > 0) {
-			n = &pn->rb_right;
-			continue;
-		}
-
-		/* Search the QP list to see if this is already there. */
-		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
-			if (p->qp == mqp->qp) {
-				ret = ESRCH;
-				goto bail;
-			}
-		}
-		if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
-			ret = ENOMEM;
-			goto bail;
-		}
-
-		tmcast->n_attached++;
-
-		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
-		ret = EEXIST;
-		goto bail;
-	}
-
-	spin_lock(&dev->n_mcast_grps_lock);
-	if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
-		spin_unlock(&dev->n_mcast_grps_lock);
-		ret = ENOMEM;
-		goto bail;
-	}
-
-	dev->n_mcast_grps_allocated++;
-	spin_unlock(&dev->n_mcast_grps_lock);
-
-	mcast->n_attached++;
-
-	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
-
-	atomic_inc(&mcast->refcount);
-	rb_link_node(&mcast->rb_node, pn, n);
-	rb_insert_color(&mcast->rb_node, &mcast_tree);
-
-	ret = 0;
-
-bail:
-	spin_unlock_irq(&mcast_lock);
-
-	return ret;
-}
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	struct ipath_qp *qp = to_iqp(ibqp);
-	struct ipath_ibdev *dev = to_idev(ibqp->device);
-	struct ipath_mcast *mcast;
-	struct ipath_mcast_qp *mqp;
-	int ret;
-
-	/*
-	 * Allocate data structures since its better to do this outside of
-	 * spin locks and it will most likely be needed.
-	 */
-	mcast = ipath_mcast_alloc(gid);
-	if (mcast == NULL) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-	mqp = ipath_mcast_qp_alloc(qp);
-	if (mqp == NULL) {
-		ipath_mcast_free(mcast);
-		ret = -ENOMEM;
-		goto bail;
-	}
-	switch (ipath_mcast_add(dev, mcast, mqp)) {
-	case ESRCH:
-		/* Neither was used: can't attach the same QP twice. */
-		ipath_mcast_qp_free(mqp);
-		ipath_mcast_free(mcast);
-		ret = -EINVAL;
-		goto bail;
-	case EEXIST:		/* The mcast wasn't used */
-		ipath_mcast_free(mcast);
-		break;
-	case ENOMEM:
-		/* Exceeded the maximum number of mcast groups. */
-		ipath_mcast_qp_free(mqp);
-		ipath_mcast_free(mcast);
-		ret = -ENOMEM;
-		goto bail;
-	default:
-		break;
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	struct ipath_qp *qp = to_iqp(ibqp);
-	struct ipath_ibdev *dev = to_idev(ibqp->device);
-	struct ipath_mcast *mcast = NULL;
-	struct ipath_mcast_qp *p, *tmp;
-	struct rb_node *n;
-	int last = 0;
-	int ret;
-
-	spin_lock_irq(&mcast_lock);
-
-	/* Find the GID in the mcast table. */
-	n = mcast_tree.rb_node;
-	while (1) {
-		if (n == NULL) {
-			spin_unlock_irq(&mcast_lock);
-			ret = -EINVAL;
-			goto bail;
-		}
-
-		mcast = rb_entry(n, struct ipath_mcast, rb_node);
-		ret = memcmp(gid->raw, mcast->mgid.raw,
-			     sizeof(union ib_gid));
-		if (ret < 0)
-			n = n->rb_left;
-		else if (ret > 0)
-			n = n->rb_right;
-		else
-			break;
-	}
-
-	/* Search the QP list. */
-	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
-		if (p->qp != qp)
-			continue;
-		/*
-		 * We found it, so remove it, but don't poison the forward
-		 * link until we are sure there are no list walkers.
-		 */
-		list_del_rcu(&p->list);
-		mcast->n_attached--;
-
-		/* If this was the last attached QP, remove the GID too. */
-		if (list_empty(&mcast->qp_list)) {
-			rb_erase(&mcast->rb_node, &mcast_tree);
-			last = 1;
-		}
-		break;
-	}
-
-	spin_unlock_irq(&mcast_lock);
-
-	if (p) {
-		/*
-		 * Wait for any list walkers to finish before freeing the
-		 * list element.
-		 */
-		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
-		ipath_mcast_qp_free(p);
-	}
-	if (last) {
-		atomic_dec(&mcast->refcount);
-		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
-		ipath_mcast_free(mcast);
-		spin_lock_irq(&dev->n_mcast_grps_lock);
-		dev->n_mcast_grps_allocated--;
-		spin_unlock_irq(&dev->n_mcast_grps_lock);
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-int ipath_mcast_tree_empty(void)
-{
-	return mcast_tree.rb_node == NULL;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c b/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
deleted file mode 100644
index 1a7e20a..0000000
--- a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on PowerPC only.  Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * Nothing to do on PowerPC, so just return without error.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
-	return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c b/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
deleted file mode 100644
index 7b6e4c8..0000000
--- a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on x86_64 only.  Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include <linux/pci.h>
-#include <asm/processor.h>
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
- * write combining.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
-	int ret = 0;
-	u64 pioaddr, piolen;
-	unsigned bits;
-	const unsigned long addr = pci_resource_start(dd->pcidev, 0);
-	const size_t len = pci_resource_len(dd->pcidev, 0);
-
-	/*
-	 * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
-	 * chip.  Linux (possibly the hardware) requires it to be on a power
-	 * of 2 address matching the length (which has to be a power of 2).
-	 * For rev1, that means the base address, for rev2, it will be just
-	 * the PIO buffers themselves.
-	 * For chips with two sets of buffers, the calculations are
-	 * somewhat more complicated; we need to sum, and the piobufbase
-	 * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
-	 * The buffers are still packed, so a single range covers both.
-	 */
-	if (dd->ipath_piobcnt2k && dd->ipath_piobcnt4k) { /* 2 sizes */
-		unsigned long pio2kbase, pio4kbase;
-		pio2kbase = dd->ipath_piobufbase & 0xffffffffUL;
-		pio4kbase = (dd->ipath_piobufbase >> 32) & 0xffffffffUL;
-		if (pio2kbase < pio4kbase) { /* all, for now */
-			pioaddr = addr + pio2kbase;
-			piolen = pio4kbase - pio2kbase +
-				dd->ipath_piobcnt4k * dd->ipath_4kalign;
-		} else {
-			pioaddr = addr + pio4kbase;
-			piolen = pio2kbase - pio4kbase +
-				dd->ipath_piobcnt2k * dd->ipath_palign;
-		}
-	} else {  /* single buffer size (2K, currently) */
-		pioaddr = addr + dd->ipath_piobufbase;
-		piolen = dd->ipath_piobcnt2k * dd->ipath_palign +
-			dd->ipath_piobcnt4k * dd->ipath_4kalign;
-	}
-
-	for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-		/* do nothing */ ;
-
-	if (piolen != (1ULL << bits)) {
-		piolen >>= bits;
-		while (piolen >>= 1)
-			bits++;
-		piolen = 1ULL << (bits + 1);
-	}
-	if (pioaddr & (piolen - 1)) {
-		u64 atmp;
-		ipath_dbg("pioaddr %llx not on right boundary for size "
-			  "%llx, fixing\n",
-			  (unsigned long long) pioaddr,
-			  (unsigned long long) piolen);
-		atmp = pioaddr & ~(piolen - 1);
-		if (atmp < addr || (atmp + piolen) > (addr + len)) {
-			ipath_dev_err(dd, "No way to align address/size "
-				      "(%llx/%llx), no WC mtrr\n",
-				      (unsigned long long) atmp,
-				      (unsigned long long) piolen << 1);
-			ret = -ENODEV;
-		} else {
-			ipath_dbg("changing WC base from %llx to %llx, "
-				  "len from %llx to %llx\n",
-				  (unsigned long long) pioaddr,
-				  (unsigned long long) atmp,
-				  (unsigned long long) piolen,
-				  (unsigned long long) piolen << 1);
-			pioaddr = atmp;
-			piolen <<= 1;
-		}
-	}
-
-	if (!ret) {
-		dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
-		if (dd->wc_cookie < 0) {
-			ipath_dev_err(dd, "Seting mtrr failed on PIO buffers\n");
-			ret = -ENODEV;
-		} else if (dd->wc_cookie == 0)
-			ipath_cdbg(VERBOSE, "Set mtrr for chip to WC not needed\n");
-		else
-			ipath_cdbg(VERBOSE, "Set mtrr for chip to WC\n");
-	}
-
-	return ret;
-}
-
-/**
- * ipath_disable_wc - disable write combining for MMIO writes to the device
- * @dd: infinipath device
- */
-void ipath_disable_wc(struct ipath_devdata *dd)
-{
-	arch_phys_wc_del(dd->wc_cookie);
-}
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index efd6f45..7e8037e 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -1,7 +1,7 @@
 menu "Speakup console speech"
 
 config SPEAKUP
-	depends on VT
+	depends on VT && !MN10300
 	tristate "Speakup core"
 	---help---
 		This is the Speakup screen reader.  Think of it as a
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3327c49..713c63d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -898,7 +898,7 @@
 	da->unmap_zeroes_data = flag;
 	pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
 		 da->da_dev, flag);
-	return 0;
+	return count;
 }
 
 /*
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cacd97a..da457e2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -828,6 +828,50 @@
 	return dev;
 }
 
+/*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+				       struct request_queue *q, int block_size)
+{
+	if (!blk_queue_discard(q))
+		return false;
+
+	attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
+								block_size;
+	/*
+	 * Currently hardcoded to 1 in Linux/SCSI code..
+	 */
+	attrib->max_unmap_block_desc_count = 1;
+	attrib->unmap_granularity = q->limits.discard_granularity / block_size;
+	attrib->unmap_granularity_alignment = q->limits.discard_alignment /
+								block_size;
+	attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
+	return true;
+}
+EXPORT_SYMBOL(target_configure_unmap_from_queue);
+
+/*
+ * Convert from blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
+ */
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+{
+	switch (dev->dev_attrib.block_size) {
+	case 4096:
+		return lb << 3;
+	case 2048:
+		return lb << 2;
+	case 1024:
+		return lb << 1;
+	default:
+		return lb;
+	}
+}
+EXPORT_SYMBOL(target_to_linux_sector);
+
 int target_configure_device(struct se_device *dev)
 {
 	struct se_hba *hba = dev->se_hba;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e319570..75f0f08 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -160,25 +160,11 @@
 			" block_device blocks: %llu logical_block_size: %d\n",
 			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
 			fd_dev->fd_block_size);
-		/*
-		 * Check if the underlying struct block_device request_queue supports
-		 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
-		 * in ATA and we need to set TPE=1
-		 */
-		if (blk_queue_discard(q)) {
-			dev->dev_attrib.max_unmap_lba_count =
-				q->limits.max_discard_sectors;
-			/*
-			 * Currently hardcoded to 1 in Linux/SCSI code..
-			 */
-			dev->dev_attrib.max_unmap_block_desc_count = 1;
-			dev->dev_attrib.unmap_granularity =
-				q->limits.discard_granularity >> 9;
-			dev->dev_attrib.unmap_granularity_alignment =
-				q->limits.discard_alignment;
+
+		if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+						      fd_dev->fd_block_size))
 			pr_debug("IFILE: BLOCK Discard support available,"
-					" disabled by default\n");
-		}
+				 " disabled by default\n");
 		/*
 		 * Enable write same emulation for IBLOCK and use 0xFFFF as
 		 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@
 	if (S_ISBLK(inode->i_mode)) {
 		/* The backend is block device, use discard */
 		struct block_device *bdev = inode->i_bdev;
+		struct se_device *dev = cmd->se_dev;
 
-		ret = blkdev_issue_discard(bdev, lba,
-				nolb, GFP_KERNEL, 0);
+		ret = blkdev_issue_discard(bdev,
+					   target_to_linux_sector(dev, lba),
+					   target_to_linux_sector(dev,  nolb),
+					   GFP_KERNEL, 0);
 		if (ret < 0) {
 			pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
 				ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a2899f..abe4eb9 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,29 +121,11 @@
 	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
-	/*
-	 * Check if the underlying struct block_device request_queue supports
-	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
-	 * in ATA and we need to set TPE=1
-	 */
-	if (blk_queue_discard(q)) {
-		dev->dev_attrib.max_unmap_lba_count =
-				q->limits.max_discard_sectors;
-
-		/*
-		 * Currently hardcoded to 1 in Linux/SCSI code..
-		 */
-		dev->dev_attrib.max_unmap_block_desc_count = 1;
-		dev->dev_attrib.unmap_granularity =
-				q->limits.discard_granularity >> 9;
-		dev->dev_attrib.unmap_granularity_alignment =
-				q->limits.discard_alignment;
-		dev->dev_attrib.unmap_zeroes_data =
-				q->limits.discard_zeroes_data;
-
+	if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+					      dev->dev_attrib.hw_block_size))
 		pr_debug("IBLOCK: BLOCK Discard support available,"
-				" disabled by default\n");
-	}
+			 " disabled by default\n");
+
 	/*
 	 * Enable write same emulation for IBLOCK and use 0xFFFF as
 	 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -415,9 +397,13 @@
 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 {
 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+	struct se_device *dev = cmd->se_dev;
 	int ret;
 
-	ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+	ret = blkdev_issue_discard(bdev,
+				   target_to_linux_sector(dev, lba),
+				   target_to_linux_sector(dev,  nolb),
+				   GFP_KERNEL, 0);
 	if (ret < 0) {
 		pr_err("blkdev_issue_discard() failed: %d\n", ret);
 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -433,8 +419,10 @@
 	struct scatterlist *sg;
 	struct bio *bio;
 	struct bio_list list;
-	sector_t block_lba = cmd->t_task_lba;
-	sector_t sectors = sbc_get_write_same_sectors(cmd);
+	struct se_device *dev = cmd->se_dev;
+	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+	sector_t sectors = target_to_linux_sector(dev,
+					sbc_get_write_same_sectors(cmd));
 
 	if (cmd->prot_op) {
 		pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -648,12 +636,12 @@
 		  enum dma_data_direction data_direction)
 {
 	struct se_device *dev = cmd->se_dev;
+	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
 	struct iblock_req *ibr;
 	struct bio *bio, *bio_start;
 	struct bio_list list;
 	struct scatterlist *sg;
 	u32 sg_num = sgl_nents;
-	sector_t block_lba;
 	unsigned bio_cnt;
 	int rw = 0;
 	int i;
@@ -679,24 +667,6 @@
 		rw = READ;
 	}
 
-	/*
-	 * Convert the blocksize advertised to the initiator to the 512 byte
-	 * units unconditionally used by the Linux block layer.
-	 */
-	if (dev->dev_attrib.block_size == 4096)
-		block_lba = (cmd->t_task_lba << 3);
-	else if (dev->dev_attrib.block_size == 2048)
-		block_lba = (cmd->t_task_lba << 2);
-	else if (dev->dev_attrib.block_size == 1024)
-		block_lba = (cmd->t_task_lba << 1);
-	else if (dev->dev_attrib.block_size == 512)
-		block_lba = cmd->t_task_lba;
-	else {
-		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
-				" %u\n", dev->dev_attrib.block_size);
-		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-	}
-
 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 	if (!ibr)
 		goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c..db4412f 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -141,7 +141,6 @@
 int	transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
 int	transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
 int	transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
-bool	target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
 void	transport_clear_lun_ref(struct se_lun *);
 void	transport_send_task_abort(struct se_cmd *);
 sense_reason_t	target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fcdcb11..82a663b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -68,23 +68,25 @@
 
 	if (dev) {
 		spin_lock_irqsave(&dev->se_tmr_lock, flags);
-		list_del(&tmr->tmr_list);
+		list_del_init(&tmr->tmr_list);
 		spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
 	}
 
 	kfree(tmr);
 }
 
-static void core_tmr_handle_tas_abort(
-	struct se_node_acl *tmr_nacl,
-	struct se_cmd *cmd,
-	int tas)
+static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
 {
-	bool remove = true;
+	unsigned long flags;
+	bool remove = true, send_tas;
 	/*
 	 * TASK ABORTED status (TAS) bit support
 	 */
-	if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	send_tas = (cmd->transport_state & CMD_T_TAS);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	if (send_tas) {
 		remove = false;
 		transport_send_task_abort(cmd);
 	}
@@ -107,6 +109,46 @@
 	return 1;
 }
 
+static bool __target_check_io_state(struct se_cmd *se_cmd,
+				    struct se_session *tmr_sess, int tas)
+{
+	struct se_session *sess = se_cmd->se_sess;
+
+	assert_spin_locked(&sess->sess_cmd_lock);
+	WARN_ON_ONCE(!irqs_disabled());
+	/*
+	 * If command already reached CMD_T_COMPLETE state within
+	 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+	 * this se_cmd has been passed to fabric driver and will
+	 * not be aborted.
+	 *
+	 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+	 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+	 * long as se_cmd->cmd_kref is still active unless zero.
+	 */
+	spin_lock(&se_cmd->t_state_lock);
+	if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
+		pr_debug("Attempted to abort io tag: %llu already complete or"
+			" fabric stop, skipping\n", se_cmd->tag);
+		spin_unlock(&se_cmd->t_state_lock);
+		return false;
+	}
+	if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+		pr_debug("Attempted to abort io tag: %llu already shutdown,"
+			" skipping\n", se_cmd->tag);
+		spin_unlock(&se_cmd->t_state_lock);
+		return false;
+	}
+	se_cmd->transport_state |= CMD_T_ABORTED;
+
+	if ((tmr_sess != se_cmd->se_sess) && tas)
+		se_cmd->transport_state |= CMD_T_TAS;
+
+	spin_unlock(&se_cmd->t_state_lock);
+
+	return kref_get_unless_zero(&se_cmd->cmd_kref);
+}
+
 void core_tmr_abort_task(
 	struct se_device *dev,
 	struct se_tmr_req *tmr,
@@ -130,34 +172,22 @@
 		if (tmr->ref_task_tag != ref_tag)
 			continue;
 
-		if (!kref_get_unless_zero(&se_cmd->cmd_kref))
-			continue;
-
 		printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
 			se_cmd->se_tfo->get_fabric_name(), ref_tag);
 
-		spin_lock(&se_cmd->t_state_lock);
-		if (se_cmd->transport_state & CMD_T_COMPLETE) {
-			printk("ABORT_TASK: ref_tag: %llu already complete,"
-			       " skipping\n", ref_tag);
-			spin_unlock(&se_cmd->t_state_lock);
+		if (!__target_check_io_state(se_cmd, se_sess, 0)) {
 			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
 			target_put_sess_cmd(se_cmd);
-
 			goto out;
 		}
-		se_cmd->transport_state |= CMD_T_ABORTED;
-		spin_unlock(&se_cmd->t_state_lock);
-
 		list_del_init(&se_cmd->se_cmd_list);
 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
 		cancel_work_sync(&se_cmd->work);
 		transport_wait_for_tasks(se_cmd);
 
-		target_put_sess_cmd(se_cmd);
 		transport_cmd_finish_abort(se_cmd, true);
+		target_put_sess_cmd(se_cmd);
 
 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
 				" ref_tag: %llu\n", ref_tag);
@@ -178,9 +208,11 @@
 	struct list_head *preempt_and_abort_list)
 {
 	LIST_HEAD(drain_tmr_list);
+	struct se_session *sess;
 	struct se_tmr_req *tmr_p, *tmr_pp;
 	struct se_cmd *cmd;
 	unsigned long flags;
+	bool rc;
 	/*
 	 * Release all pending and outgoing TMRs aside from the received
 	 * LUN_RESET tmr..
@@ -206,17 +238,39 @@
 		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 			continue;
 
+		sess = cmd->se_sess;
+		if (WARN_ON_ONCE(!sess))
+			continue;
+
+		spin_lock(&sess->sess_cmd_lock);
 		spin_lock(&cmd->t_state_lock);
-		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+		if (!(cmd->transport_state & CMD_T_ACTIVE) ||
+		     (cmd->transport_state & CMD_T_FABRIC_STOP)) {
 			spin_unlock(&cmd->t_state_lock);
+			spin_unlock(&sess->sess_cmd_lock);
 			continue;
 		}
 		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
 			spin_unlock(&cmd->t_state_lock);
+			spin_unlock(&sess->sess_cmd_lock);
 			continue;
 		}
+		if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+			spin_unlock(&cmd->t_state_lock);
+			spin_unlock(&sess->sess_cmd_lock);
+			continue;
+		}
+		cmd->transport_state |= CMD_T_ABORTED;
 		spin_unlock(&cmd->t_state_lock);
 
+		rc = kref_get_unless_zero(&cmd->cmd_kref);
+		if (!rc) {
+			printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
+			spin_unlock(&sess->sess_cmd_lock);
+			continue;
+		}
+		spin_unlock(&sess->sess_cmd_lock);
+
 		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
 	}
 	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +284,26 @@
 			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 			tmr_p->function, tmr_p->response, cmd->t_state);
 
+		cancel_work_sync(&cmd->work);
+		transport_wait_for_tasks(cmd);
+
 		transport_cmd_finish_abort(cmd, 1);
+		target_put_sess_cmd(cmd);
 	}
 }
 
 static void core_tmr_drain_state_list(
 	struct se_device *dev,
 	struct se_cmd *prout_cmd,
-	struct se_node_acl *tmr_nacl,
+	struct se_session *tmr_sess,
 	int tas,
 	struct list_head *preempt_and_abort_list)
 {
 	LIST_HEAD(drain_task_list);
+	struct se_session *sess;
 	struct se_cmd *cmd, *next;
 	unsigned long flags;
+	int rc;
 
 	/*
 	 * Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +342,16 @@
 		if (prout_cmd == cmd)
 			continue;
 
+		sess = cmd->se_sess;
+		if (WARN_ON_ONCE(!sess))
+			continue;
+
+		spin_lock(&sess->sess_cmd_lock);
+		rc = __target_check_io_state(cmd, tmr_sess, tas);
+		spin_unlock(&sess->sess_cmd_lock);
+		if (!rc)
+			continue;
+
 		list_move_tail(&cmd->state_list, &drain_task_list);
 		cmd->state_active = false;
 	}
@@ -289,7 +359,7 @@
 
 	while (!list_empty(&drain_task_list)) {
 		cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
-		list_del(&cmd->state_list);
+		list_del_init(&cmd->state_list);
 
 		pr_debug("LUN_RESET: %s cmd: %p"
 			" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +383,11 @@
 		 * loop above, but we do it down here given that
 		 * cancel_work_sync may block.
 		 */
-		if (cmd->t_state == TRANSPORT_COMPLETE)
-			cancel_work_sync(&cmd->work);
+		cancel_work_sync(&cmd->work);
+		transport_wait_for_tasks(cmd);
 
-		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		target_stop_cmd(cmd, &flags);
-
-		cmd->transport_state |= CMD_T_ABORTED;
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
+		core_tmr_handle_tas_abort(cmd, tas);
+		target_put_sess_cmd(cmd);
 	}
 }
 
@@ -334,6 +399,7 @@
 {
 	struct se_node_acl *tmr_nacl = NULL;
 	struct se_portal_group *tmr_tpg = NULL;
+	struct se_session *tmr_sess = NULL;
 	int tas;
         /*
 	 * TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +418,9 @@
 	 * or struct se_device passthrough..
 	 */
 	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
-		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
-		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+		tmr_sess = tmr->task_cmd->se_sess;
+		tmr_nacl = tmr_sess->se_node_acl;
+		tmr_tpg = tmr_sess->se_tpg;
 		if (tmr_nacl && tmr_tpg) {
 			pr_debug("LUN_RESET: TMR caller fabric: %s"
 				" initiator port %s\n",
@@ -366,7 +433,7 @@
 		dev->transport->name, tas);
 
 	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
-	core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
+	core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
 				preempt_and_abort_list);
 
 	/*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9f3608e..867bc6d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -534,9 +534,6 @@
 }
 EXPORT_SYMBOL(transport_deregister_session);
 
-/*
- * Called with cmd->t_state_lock held.
- */
 static void target_remove_from_state_list(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
@@ -561,10 +558,6 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (write_pending)
-		cmd->t_state = TRANSPORT_WRITE_PENDING;
-
 	if (remove_from_lists) {
 		target_remove_from_state_list(cmd);
 
@@ -574,6 +567,10 @@
 		cmd->se_lun = NULL;
 	}
 
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (write_pending)
+		cmd->t_state = TRANSPORT_WRITE_PENDING;
+
 	/*
 	 * Determine if frontend context caller is requesting the stopping of
 	 * this command for frontend exceptions.
@@ -627,6 +624,8 @@
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
+	bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+
 	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
 		transport_lun_remove_cmd(cmd);
 	/*
@@ -638,7 +637,7 @@
 
 	if (transport_cmd_check_stop_to_fabric(cmd))
 		return;
-	if (remove)
+	if (remove && ack_kref)
 		transport_put_cmd(cmd);
 }
 
@@ -694,19 +693,10 @@
 	}
 
 	/*
-	 * See if we are waiting to complete for an exception condition.
-	 */
-	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		complete(&cmd->task_stop_comp);
-		return;
-	}
-
-	/*
 	 * Check for case where an explicit ABORT_TASK has been received
 	 * and transport_wait_for_tasks() will be waiting for completion..
 	 */
-	if (cmd->transport_state & CMD_T_ABORTED &&
+	if (cmd->transport_state & CMD_T_ABORTED ||
 	    cmd->transport_state & CMD_T_STOP) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		complete_all(&cmd->t_transport_stop_comp);
@@ -721,10 +711,10 @@
 	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	if (cmd->cpuid == -1)
-		queue_work(target_completion_wq, &cmd->work);
-	else
+	if (cmd->se_cmd_flags & SCF_USE_CPUID)
 		queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+	else
+		queue_work(target_completion_wq, &cmd->work);
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
@@ -1203,7 +1193,6 @@
 	INIT_LIST_HEAD(&cmd->state_list);
 	init_completion(&cmd->t_transport_stop_comp);
 	init_completion(&cmd->cmd_wait_comp);
-	init_completion(&cmd->task_stop_comp);
 	spin_lock_init(&cmd->t_state_lock);
 	kref_init(&cmd->cmd_kref);
 	cmd->transport_state = CMD_T_DEV_ACTIVE;
@@ -1437,6 +1426,12 @@
 	 */
 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
 				data_length, data_dir, task_attr, sense);
+
+	if (flags & TARGET_SCF_USE_CPUID)
+		se_cmd->se_cmd_flags |= SCF_USE_CPUID;
+	else
+		se_cmd->cpuid = WORK_CPU_UNBOUND;
+
 	if (flags & TARGET_SCF_UNKNOWN_SIZE)
 		se_cmd->unknown_data_length = 1;
 	/*
@@ -1635,33 +1630,6 @@
 EXPORT_SYMBOL(target_submit_tmr);
 
 /*
- * If the cmd is active, request it to be stopped and sleep until it
- * has completed.
- */
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
-	__releases(&cmd->t_state_lock)
-	__acquires(&cmd->t_state_lock)
-{
-	bool was_active = false;
-
-	if (cmd->transport_state & CMD_T_BUSY) {
-		cmd->transport_state |= CMD_T_REQUEST_STOP;
-		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
-		pr_debug("cmd %p waiting to complete\n", cmd);
-		wait_for_completion(&cmd->task_stop_comp);
-		pr_debug("cmd %p stopped successfully\n", cmd);
-
-		spin_lock_irqsave(&cmd->t_state_lock, *flags);
-		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
-		cmd->transport_state &= ~CMD_T_BUSY;
-		was_active = true;
-	}
-
-	return was_active;
-}
-
-/*
  * Handle SAM-esque emulation for generic transport request failures.
  */
 void transport_generic_request_failure(struct se_cmd *cmd,
@@ -1859,19 +1827,21 @@
 	return true;
 }
 
+static int __transport_check_aborted_status(struct se_cmd *, int);
+
 void target_execute_cmd(struct se_cmd *cmd)
 {
 	/*
-	 * If the received CDB has aleady been aborted stop processing it here.
-	 */
-	if (transport_check_aborted_status(cmd, 1))
-		return;
-
-	/*
 	 * Determine if frontend context caller is requesting the stopping of
 	 * this command for frontend exceptions.
+	 *
+	 * If the received CDB has aleady been aborted stop processing it here.
 	 */
 	spin_lock_irq(&cmd->t_state_lock);
+	if (__transport_check_aborted_status(cmd, 1)) {
+		spin_unlock_irq(&cmd->t_state_lock);
+		return;
+	}
 	if (cmd->transport_state & CMD_T_STOP) {
 		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
 			__func__, __LINE__, cmd->tag);
@@ -2222,28 +2192,6 @@
 }
 
 /**
- * transport_release_cmd - free a command
- * @cmd:       command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-static int transport_release_cmd(struct se_cmd *cmd)
-{
-	BUG_ON(!cmd->se_tfo);
-
-	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
-		core_tmr_release_req(cmd->se_tmr_req);
-	if (cmd->t_task_cdb != cmd->__t_task_cdb)
-		kfree(cmd->t_task_cdb);
-	/*
-	 * If this cmd has been setup with target_get_sess_cmd(), drop
-	 * the kref and call ->release_cmd() in kref callback.
-	 */
-	return target_put_sess_cmd(cmd);
-}
-
-/**
  * transport_put_cmd - release a reference to a command
  * @cmd:       command to release
  *
@@ -2251,8 +2199,12 @@
  */
 static int transport_put_cmd(struct se_cmd *cmd)
 {
-	transport_free_pages(cmd);
-	return transport_release_cmd(cmd);
+	BUG_ON(!cmd->se_tfo);
+	/*
+	 * If this cmd has been setup with target_get_sess_cmd(), drop
+	 * the kref and call ->release_cmd() in kref callback.
+	 */
+	return target_put_sess_cmd(cmd);
 }
 
 void *transport_kmap_data_sg(struct se_cmd *cmd)
@@ -2450,34 +2402,58 @@
 	}
 }
 
-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+static bool
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
+			   unsigned long *flags);
+
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
 {
 	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+{
 	int ret = 0;
+	bool aborted = false, tas = false;
 
 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
 		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
-			 transport_wait_for_tasks(cmd);
+			target_wait_free_cmd(cmd, &aborted, &tas);
 
-		ret = transport_release_cmd(cmd);
+		if (!aborted || tas)
+			ret = transport_put_cmd(cmd);
 	} else {
 		if (wait_for_tasks)
-			transport_wait_for_tasks(cmd);
+			target_wait_free_cmd(cmd, &aborted, &tas);
 		/*
 		 * Handle WRITE failure case where transport_generic_new_cmd()
 		 * has already added se_cmd to state_list, but fabric has
 		 * failed command before I/O submission.
 		 */
-		if (cmd->state_active) {
-			spin_lock_irqsave(&cmd->t_state_lock, flags);
+		if (cmd->state_active)
 			target_remove_from_state_list(cmd);
-			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		}
 
 		if (cmd->se_lun)
 			transport_lun_remove_cmd(cmd);
 
-		ret = transport_put_cmd(cmd);
+		if (!aborted || tas)
+			ret = transport_put_cmd(cmd);
+	}
+	/*
+	 * If the task has been internally aborted due to TMR ABORT_TASK
+	 * or LUN_RESET, target_core_tmr.c is responsible for performing
+	 * the remaining calls to target_put_sess_cmd(), and not the
+	 * callers of this function.
+	 */
+	if (aborted) {
+		pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
+		wait_for_completion(&cmd->cmd_wait_comp);
+		cmd->se_tfo->release_cmd(cmd);
+		ret = 1;
 	}
 	return ret;
 }
@@ -2517,26 +2493,46 @@
 }
 EXPORT_SYMBOL(target_get_sess_cmd);
 
+static void target_free_cmd_mem(struct se_cmd *cmd)
+{
+	transport_free_pages(cmd);
+
+	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+		core_tmr_release_req(cmd->se_tmr_req);
+	if (cmd->t_task_cdb != cmd->__t_task_cdb)
+		kfree(cmd->t_task_cdb);
+}
+
 static void target_release_cmd_kref(struct kref *kref)
 {
 	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
 	struct se_session *se_sess = se_cmd->se_sess;
 	unsigned long flags;
+	bool fabric_stop;
 
 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 	if (list_empty(&se_cmd->se_cmd_list)) {
 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+		target_free_cmd_mem(se_cmd);
 		se_cmd->se_tfo->release_cmd(se_cmd);
 		return;
 	}
-	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+
+	spin_lock(&se_cmd->t_state_lock);
+	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+	spin_unlock(&se_cmd->t_state_lock);
+
+	if (se_cmd->cmd_wait_set || fabric_stop) {
+		list_del_init(&se_cmd->se_cmd_list);
 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+		target_free_cmd_mem(se_cmd);
 		complete(&se_cmd->cmd_wait_comp);
 		return;
 	}
-	list_del(&se_cmd->se_cmd_list);
+	list_del_init(&se_cmd->se_cmd_list);
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
+	target_free_cmd_mem(se_cmd);
 	se_cmd->se_tfo->release_cmd(se_cmd);
 }
 
@@ -2548,6 +2544,7 @@
 	struct se_session *se_sess = se_cmd->se_sess;
 
 	if (!se_sess) {
+		target_free_cmd_mem(se_cmd);
 		se_cmd->se_tfo->release_cmd(se_cmd);
 		return 1;
 	}
@@ -2564,6 +2561,7 @@
 {
 	struct se_cmd *se_cmd;
 	unsigned long flags;
+	int rc;
 
 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 	if (se_sess->sess_tearing_down) {
@@ -2573,8 +2571,15 @@
 	se_sess->sess_tearing_down = 1;
 	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
 
-	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
-		se_cmd->cmd_wait_set = 1;
+	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+		rc = kref_get_unless_zero(&se_cmd->cmd_kref);
+		if (rc) {
+			se_cmd->cmd_wait_set = 1;
+			spin_lock(&se_cmd->t_state_lock);
+			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+			spin_unlock(&se_cmd->t_state_lock);
+		}
+	}
 
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 }
@@ -2587,15 +2592,25 @@
 {
 	struct se_cmd *se_cmd, *tmp_cmd;
 	unsigned long flags;
+	bool tas;
 
 	list_for_each_entry_safe(se_cmd, tmp_cmd,
 				&se_sess->sess_wait_list, se_cmd_list) {
-		list_del(&se_cmd->se_cmd_list);
+		list_del_init(&se_cmd->se_cmd_list);
 
 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
 			" %d\n", se_cmd, se_cmd->t_state,
 			se_cmd->se_tfo->get_cmd_state(se_cmd));
 
+		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+		tas = (se_cmd->transport_state & CMD_T_TAS);
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+		if (!target_put_sess_cmd(se_cmd)) {
+			if (tas)
+				target_put_sess_cmd(se_cmd);
+		}
+
 		wait_for_completion(&se_cmd->cmd_wait_comp);
 		pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
 			" fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2617,6 +2632,58 @@
 	wait_for_completion(&lun->lun_ref_comp);
 }
 
+static bool
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+			   bool *aborted, bool *tas, unsigned long *flags)
+	__releases(&cmd->t_state_lock)
+	__acquires(&cmd->t_state_lock)
+{
+
+	assert_spin_locked(&cmd->t_state_lock);
+	WARN_ON_ONCE(!irqs_disabled());
+
+	if (fabric_stop)
+		cmd->transport_state |= CMD_T_FABRIC_STOP;
+
+	if (cmd->transport_state & CMD_T_ABORTED)
+		*aborted = true;
+
+	if (cmd->transport_state & CMD_T_TAS)
+		*tas = true;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+		return false;
+
+	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+		return false;
+
+	if (!(cmd->transport_state & CMD_T_ACTIVE))
+		return false;
+
+	if (fabric_stop && *aborted)
+		return false;
+
+	cmd->transport_state |= CMD_T_STOP;
+
+	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
+		 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
+		 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+
+	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+	wait_for_completion(&cmd->t_transport_stop_comp);
+
+	spin_lock_irqsave(&cmd->t_state_lock, *flags);
+	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+
+	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
+		 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
+
+	return true;
+}
+
 /**
  * transport_wait_for_tasks - wait for completion to occur
  * @cmd:	command to wait
@@ -2627,43 +2694,13 @@
 bool transport_wait_for_tasks(struct se_cmd *cmd)
 {
 	unsigned long flags;
+	bool ret, aborted = false, tas = false;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
-	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		return false;
-	}
-
-	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
-	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		return false;
-	}
-
-	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		return false;
-	}
-
-	cmd->transport_state |= CMD_T_STOP;
-
-	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
-		cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
-
+	ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	wait_for_completion(&cmd->t_transport_stop_comp);
-
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
-
-	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
-		cmd->tag);
-
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-	return true;
+	return ret;
 }
 EXPORT_SYMBOL(transport_wait_for_tasks);
 
@@ -2845,28 +2882,49 @@
 }
 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
 
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+	__releases(&cmd->t_state_lock)
+	__acquires(&cmd->t_state_lock)
 {
+	assert_spin_locked(&cmd->t_state_lock);
+	WARN_ON_ONCE(!irqs_disabled());
+
 	if (!(cmd->transport_state & CMD_T_ABORTED))
 		return 0;
-
 	/*
 	 * If cmd has been aborted but either no status is to be sent or it has
 	 * already been sent, just return
 	 */
-	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
+		if (send_status)
+			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
 		return 1;
+	}
 
-	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
-		 cmd->t_task_cdb[0], cmd->tag);
+	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
+		" 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
 
 	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 	trace_target_cmd_complete(cmd);
+
+	spin_unlock_irq(&cmd->t_state_lock);
 	cmd->se_tfo->queue_status(cmd);
+	spin_lock_irq(&cmd->t_state_lock);
 
 	return 1;
 }
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+	int ret;
+
+	spin_lock_irq(&cmd->t_state_lock);
+	ret = __transport_check_aborted_status(cmd, send_status);
+	spin_unlock_irq(&cmd->t_state_lock);
+
+	return ret;
+}
 EXPORT_SYMBOL(transport_check_aborted_status);
 
 void transport_send_task_abort(struct se_cmd *cmd)
@@ -2888,11 +2946,17 @@
 	 */
 	if (cmd->data_direction == DMA_TO_DEVICE) {
 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-			cmd->transport_state |= CMD_T_ABORTED;
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
+			if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
+				spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+				goto send_abort;
+			}
 			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 			return;
 		}
 	}
+send_abort:
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 
 	transport_lun_remove_cmd(cmd);
@@ -2909,8 +2973,17 @@
 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 	struct se_device *dev = cmd->se_dev;
 	struct se_tmr_req *tmr = cmd->se_tmr_req;
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		tmr->response = TMR_FUNCTION_REJECTED;
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		goto check_stop;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
 	switch (tmr->function) {
 	case TMR_ABORT_TASK:
 		core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2943,9 +3016,17 @@
 		break;
 	}
 
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		goto check_stop;
+	}
 	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
 	cmd->se_tfo->queue_tm_rsp(cmd);
 
+check_stop:
 	transport_cmd_check_stop_to_fabric(cmd);
 }
 
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index dd600e5..94f5154 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -903,7 +903,7 @@
 	info->version = __stringify(TCMU_MAILBOX_VERSION);
 
 	info->mem[0].name = "tcm-user command & data buffer";
-	info->mem[0].addr = (phys_addr_t) udev->mb_addr;
+	info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
 	info->mem[0].size = TCMU_RING_SIZE;
 	info->mem[0].memtype = UIO_MEM_VIRTUAL;
 
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8cc4ac6..7c92c09 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -195,7 +195,7 @@
 	  passive trip is crossed.
 
 config SPEAR_THERMAL
-	bool "SPEAr thermal sensor driver"
+	tristate "SPEAr thermal sensor driver"
 	depends on PLAT_SPEAR || COMPILE_TEST
 	depends on OF
 	help
@@ -237,8 +237,8 @@
 	  framework.
 
 config DB8500_THERMAL
-	bool "DB8500 thermal management"
-	depends on ARCH_U8500
+	tristate "DB8500 thermal management"
+	depends on MFD_DB8500_PRCMU
 	default y
 	help
 	  Adds DB8500 thermal management implementation according to the thermal
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index e3fbc5a..6ceac4f 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -377,26 +377,28 @@
  * get_load() - get load for a cpu since last updated
  * @cpufreq_device:	&struct cpufreq_cooling_device for this cpu
  * @cpu:	cpu number
+ * @cpu_idx:	index of the cpu in cpufreq_device->allowed_cpus
  *
  * Return: The average load of cpu @cpu in percentage since this
  * function was last called.
  */
-static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
+		    int cpu_idx)
 {
 	u32 load;
 	u64 now, now_idle, delta_time, delta_idle;
 
 	now_idle = get_cpu_idle_time(cpu, &now, 0);
-	delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
-	delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
+	delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
+	delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
 
 	if (delta_time <= delta_idle)
 		load = 0;
 	else
 		load = div64_u64(100 * (delta_time - delta_idle), delta_time);
 
-	cpufreq_device->time_in_idle[cpu] = now_idle;
-	cpufreq_device->time_in_idle_timestamp[cpu] = now;
+	cpufreq_device->time_in_idle[cpu_idx] = now_idle;
+	cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
 
 	return load;
 }
@@ -598,7 +600,7 @@
 		u32 load;
 
 		if (cpu_online(cpu))
-			load = get_load(cpufreq_device, cpu);
+			load = get_load(cpufreq_device, cpu, i);
 		else
 			load = 0;
 
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index be4eedc..9043f8f 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -475,14 +475,10 @@
 
 	sensor_np = of_node_get(dev->of_node);
 
-	for_each_child_of_node(np, child) {
+	for_each_available_child_of_node(np, child) {
 		struct of_phandle_args sensor_specs;
 		int ret, id;
 
-		/* Check whether child is enabled or not */
-		if (!of_device_is_available(child))
-			continue;
-
 		/* For now, thermal framework supports only 1 sensor per zone */
 		ret = of_parse_phandle_with_args(child, "thermal-sensors",
 						 "#thermal-sensor-cells",
@@ -881,16 +877,12 @@
 		return 0; /* Run successfully on systems without thermal DT */
 	}
 
-	for_each_child_of_node(np, child) {
+	for_each_available_child_of_node(np, child) {
 		struct thermal_zone_device *zone;
 		struct thermal_zone_params *tzp;
 		int i, mask = 0;
 		u32 prop;
 
-		/* Check whether child is enabled or not */
-		if (!of_device_is_available(child))
-			continue;
-
 		tz = thermal_of_build_thermal_zone(child);
 		if (IS_ERR(tz)) {
 			pr_err("failed to build thermal zone %s: %ld\n",
@@ -968,13 +960,9 @@
 		return;
 	}
 
-	for_each_child_of_node(np, child) {
+	for_each_available_child_of_node(np, child) {
 		struct thermal_zone_device *zone;
 
-		/* Check whether child is enabled or not */
-		if (!of_device_is_available(child))
-			continue;
-
 		zone = thermal_zone_get_zone_by_name(child->name);
 		if (IS_ERR(zone))
 			continue;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 44b9c48..0e735ac 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reboot.h>
@@ -75,8 +76,10 @@
 #define rcar_has_irq_support(priv)	((priv)->common->base)
 #define rcar_id_to_shift(priv)		((priv)->id * 8)
 
+#define USE_OF_THERMAL	1
 static const struct of_device_id rcar_thermal_dt_ids[] = {
 	{ .compatible = "renesas,rcar-thermal", },
+	{ .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL },
 	{},
 };
 MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
@@ -200,9 +203,9 @@
 	return ret;
 }
 
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
+					 int *temp)
 {
-	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
 	int tmp;
 	int ret;
 
@@ -226,6 +229,20 @@
 	return 0;
 }
 
+static int rcar_thermal_of_get_temp(void *data, int *temp)
+{
+	struct rcar_thermal_priv *priv = data;
+
+	return rcar_thermal_get_current_temp(priv, temp);
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+{
+	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+	return rcar_thermal_get_current_temp(priv, temp);
+}
+
 static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
 				      int trip, enum thermal_trip_type *type)
 {
@@ -282,6 +299,10 @@
 	return 0;
 }
 
+static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
+	.get_temp	= rcar_thermal_of_get_temp,
+};
+
 static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
 	.get_temp	= rcar_thermal_get_temp,
 	.get_trip_type	= rcar_thermal_get_trip_type,
@@ -318,14 +339,20 @@
 
 	priv = container_of(work, struct rcar_thermal_priv, work.work);
 
-	rcar_thermal_get_temp(priv->zone, &cctemp);
+	ret = rcar_thermal_get_current_temp(priv, &cctemp);
+	if (ret < 0)
+		return;
+
 	ret = rcar_thermal_update_temp(priv);
 	if (ret < 0)
 		return;
 
 	rcar_thermal_irq_enable(priv);
 
-	rcar_thermal_get_temp(priv->zone, &nctemp);
+	ret = rcar_thermal_get_current_temp(priv, &nctemp);
+	if (ret < 0)
+		return;
+
 	if (nctemp != cctemp)
 		thermal_zone_device_update(priv->zone);
 }
@@ -403,6 +430,8 @@
 	struct rcar_thermal_priv *priv;
 	struct device *dev = &pdev->dev;
 	struct resource *res, *irq;
+	const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
+	unsigned long of_data = (unsigned long)of_id->data;
 	int mres = 0;
 	int i;
 	int ret = -ENODEV;
@@ -463,7 +492,13 @@
 		if (ret < 0)
 			goto error_unregister;
 
-		priv->zone = thermal_zone_device_register("rcar_thermal",
+		if (of_data == USE_OF_THERMAL)
+			priv->zone = thermal_zone_of_sensor_register(
+						dev, i, priv,
+						&rcar_thermal_zone_of_ops);
+		else
+			priv->zone = thermal_zone_device_register(
+						"rcar_thermal",
 						1, 0, priv,
 						&rcar_thermal_zone_ops, NULL, 0,
 						idle);
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 534dd91..81b35aa 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -54,8 +54,7 @@
 	.get_temp = thermal_get_temp,
 };
 
-#ifdef CONFIG_PM
-static int spear_thermal_suspend(struct device *dev)
+static int __maybe_unused spear_thermal_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@
 	return 0;
 }
 
-static int spear_thermal_resume(struct device *dev)
+static int __maybe_unused spear_thermal_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@
 
 	return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
 		spear_thermal_resume);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index b311004..2348fa6 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -681,7 +681,14 @@
 /* this is called once with whichever end is closed last */
 static void pty_unix98_shutdown(struct tty_struct *tty)
 {
-	devpts_kill_index(tty->driver_data, tty->index);
+	struct inode *ptmx_inode;
+
+	if (tty->driver->subtype == PTY_TYPE_MASTER)
+		ptmx_inode = tty->driver_data;
+	else
+		ptmx_inode = tty->link->driver_data;
+	devpts_kill_index(ptmx_inode, tty->index);
+	devpts_del_ref(ptmx_inode);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -773,6 +780,18 @@
 	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
 	tty->driver_data = inode;
 
+	/*
+	 * In the case where all references to ptmx inode are dropped and we
+	 * still have /dev/tty opened pointing to the master/slave pair (ptmx
+	 * is closed/released before /dev/tty), we must make sure that the inode
+	 * is still valid when we call the final pty_unix98_shutdown, thus we
+	 * hold an additional reference to the ptmx inode. For the same /dev/tty
+	 * last close case, we also need to make sure the super_block isn't
+	 * destroyed (devpts instance unmounted), before /dev/tty is closed and
+	 * on its release devpts_kill_index is called.
+	 */
+	devpts_add_ref(inode);
+
 	tty_add_file(tty, filp);
 
 	slave_inode = devpts_pty_new(inode,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e71ec78..7cd6f9a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1941,6 +1941,7 @@
 #define PCIE_VENDOR_ID_WCH		0x1c00
 #define PCIE_DEVICE_ID_WCH_CH382_2S1P	0x3250
 #define PCIE_DEVICE_ID_WCH_CH384_4S	0x3470
+#define PCIE_DEVICE_ID_WCH_CH382_2S	0x3253
 
 #define PCI_VENDOR_ID_PERICOM			0x12D8
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7951	0x7951
@@ -2637,6 +2638,14 @@
 		.subdevice	= PCI_ANY_ID,
 		.setup		= pci_wch_ch353_setup,
 	},
+	/* WCH CH382 2S card (16850 clone) */
+	{
+		.vendor         = PCIE_VENDOR_ID_WCH,
+		.device         = PCIE_DEVICE_ID_WCH_CH382_2S,
+		.subvendor      = PCI_ANY_ID,
+		.subdevice      = PCI_ANY_ID,
+		.setup          = pci_wch_ch38x_setup,
+	},
 	/* WCH CH382 2S1P card (16850 clone) */
 	{
 		.vendor         = PCIE_VENDOR_ID_WCH,
@@ -2955,6 +2964,7 @@
 	pbn_fintek_4,
 	pbn_fintek_8,
 	pbn_fintek_12,
+	pbn_wch382_2,
 	pbn_wch384_4,
 	pbn_pericom_PI7C9X7951,
 	pbn_pericom_PI7C9X7952,
@@ -3775,6 +3785,13 @@
 		.base_baud	= 115200,
 		.first_offset	= 0x40,
 	},
+	[pbn_wch382_2] = {
+		.flags		= FL_BASE0,
+		.num_ports	= 2,
+		.base_baud	= 115200,
+		.uart_offset	= 8,
+		.first_offset	= 0xC0,
+	},
 	[pbn_wch384_4] = {
 		.flags		= FL_BASE0,
 		.num_ports	= 4,
@@ -5574,6 +5591,10 @@
 		PCI_ANY_ID, PCI_ANY_ID,
 		0, 0, pbn_b0_bt_2_115200 },
 
+	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0, 0, pbn_wch382_2 },
+
 	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
 		PCI_ANY_ID, PCI_ANY_ID,
 		0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index b645f92..fa49eb1 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1165,7 +1165,7 @@
 
 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
 
-static void wait_for_xmitr(struct uart_omap_port *up)
+static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
 {
 	unsigned int status, tmout = 10000;
 
@@ -1343,7 +1343,7 @@
 
 /* Enable or disable the rs485 support */
 static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
 {
 	struct uart_omap_port *up = to_uart_omap_port(port);
 	unsigned int mode;
@@ -1356,8 +1356,12 @@
 	up->ier = 0;
 	serial_out(up, UART_IER, 0);
 
+	/* Clamp the delays to [0, 100ms] */
+	rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+	rs485->delay_rts_after_send  = min(rs485->delay_rts_after_send, 100U);
+
 	/* store new config */
-	port->rs485 = *rs485conf;
+	port->rs485 = *rs485;
 
 	/*
 	 * Just as a precaution, only allow rs485
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 5cec01c..a7eacef 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2066,13 +2066,12 @@
 		if (tty) {
 			mutex_unlock(&tty_mutex);
 			retval = tty_lock_interruptible(tty);
+			tty_kref_put(tty);  /* drop kref from tty_driver_lookup_tty() */
 			if (retval) {
 				if (retval == -EINTR)
 					retval = -ERESTARTSYS;
 				goto err_unref;
 			}
-			/* safe to drop the kref from tty_driver_lookup_tty() */
-			tty_kref_put(tty);
 			retval = tty_reopen(tty);
 			if (retval < 0) {
 				tty_unlock(tty);
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index d2f3c4c..dfa9ec0 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -21,10 +21,15 @@
 
 int tty_lock_interruptible(struct tty_struct *tty)
 {
+	int ret;
+
 	if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
 		return -EIO;
 	tty_kref_get(tty);
-	return mutex_lock_interruptible(&tty->legacy_mutex);
+	ret = mutex_lock_interruptible(&tty->legacy_mutex);
+	if (ret)
+		tty_kref_put(tty);
+	return ret;
 }
 
 void __lockfunc tty_unlock(struct tty_struct *tty)
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 39a0fa8..e991d55 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -572,12 +572,6 @@
 	set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
 	clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
 
-	/*
-	 * If the force mode bit is already set, don't set it.
-	 */
-	if ((gusbcfg & set) && !(gusbcfg & clear))
-		return false;
-
 	gusbcfg &= ~clear;
 	gusbcfg |= set;
 	dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
@@ -3278,9 +3272,6 @@
 /**
  * During device initialization, read various hardware configuration
  * registers and interpret the contents.
- *
- * This should be called during driver probe. It will perform a core
- * soft reset in order to get the reset values of the parameters.
  */
 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
 {
@@ -3288,7 +3279,6 @@
 	unsigned width;
 	u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
 	u32 grxfsiz;
-	int retval;
 
 	/*
 	 * Attempt to ensure this device is really a DWC_otg Controller.
@@ -3308,10 +3298,6 @@
 		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
 		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
 
-	retval = dwc2_core_reset(hsotg);
-	if (retval)
-		return retval;
-
 	hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
 	hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
 	hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 510f787..690b9fd 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -530,7 +530,13 @@
 	if (retval)
 		return retval;
 
-	/* Reset the controller and detect hardware config values */
+	/*
+	 * Reset before dwc2_get_hwparams() then it could get power-on real
+	 * reset value form registers.
+	 */
+	dwc2_core_reset_and_force_dr_mode(hsotg);
+
+	/* Detect config values from hardware */
 	retval = dwc2_get_hwparams(hsotg);
 	if (retval)
 		goto error;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index af023a8..7d1dd82 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2789,6 +2789,7 @@
 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
 	dwc->gadget.sg_supported	= true;
 	dwc->gadget.name		= "dwc3-gadget";
+	dwc->gadget.is_otg		= dwc->dr_mode == USB_DR_MODE_OTG;
 
 	/*
 	 * FIXME We might be setting max_speed to <SUPER, however versions
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 04ce6b1..e0244fb 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -112,12 +112,16 @@
 	offset = start;
 	if (!start || start == XHCI_HCC_PARAMS_OFFSET) {
 		val = readl(base + XHCI_HCC_PARAMS_OFFSET);
+		if (val == ~0)
+			return 0;
 		offset = XHCI_HCC_EXT_CAPS(val) << 2;
 		if (!offset)
 			return 0;
 	};
 	do {
 		val = readl(base + offset);
+		if (val == ~0)
+			return 0;
 		if (XHCI_EXT_CAPS_ID(val) == id && offset != start)
 			return offset;
 
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index c30de7c..73f763c 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -275,8 +275,9 @@
 		return false;
 
 	/*
-	 * for LS & FS periodic endpoints which its device don't attach
-	 * to TT are also ignored, root-hub will schedule them directly
+	 * for LS & FS periodic endpoints which its device is not behind
+	 * a TT are also ignored, root-hub will schedule them directly,
+	 * but need set @bpkts field of endpoint context to 1.
 	 */
 	if (is_fs_or_ls(speed) && !has_tt)
 		return false;
@@ -339,8 +340,17 @@
 		GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
 		usb_endpoint_dir_in(&ep->desc), ep);
 
-	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
+	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
+		/*
+		 * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
+		 * device does not connected through an external HS hub
+		 */
+		if (usb_endpoint_xfer_int(&ep->desc)
+			|| usb_endpoint_xfer_isoc(&ep->desc))
+			ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
+
 		return 0;
+	}
 
 	bw_index = get_bw_index(xhci, udev, ep);
 	sch_bw = &sch_array[bw_index];
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index c9ab6a4..9532f5a 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -696,9 +696,24 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
+/*
+ * if ip sleep fails, and all clocks are disabled, access register will hang
+ * AHB bus, so stop polling roothubs to avoid regs access on bus suspend.
+ * and no need to check whether ip sleep failed or not; this will cause SPM
+ * to wake up system immediately after system suspend complete if ip sleep
+ * fails, it is what we wanted.
+ */
 static int xhci_mtk_suspend(struct device *dev)
 {
 	struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+	struct usb_hcd *hcd = mtk->hcd;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	xhci_dbg(xhci, "%s: stop port polling\n", __func__);
+	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	del_timer_sync(&hcd->rh_timer);
+	clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+	del_timer_sync(&xhci->shared_hcd->rh_timer);
 
 	xhci_mtk_host_disable(mtk);
 	xhci_mtk_phy_power_off(mtk);
@@ -710,11 +725,19 @@
 static int xhci_mtk_resume(struct device *dev)
 {
 	struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+	struct usb_hcd *hcd = mtk->hcd;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
 	usb_wakeup_disable(mtk);
 	xhci_mtk_clks_enable(mtk);
 	xhci_mtk_phy_power_on(mtk);
 	xhci_mtk_host_enable(mtk);
+
+	xhci_dbg(xhci, "%s: restart port polling\n", __func__);
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	usb_hcd_poll_rh_status(hcd);
+	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+	usb_hcd_poll_rh_status(xhci->shared_hcd);
 	return 0;
 }
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 58c43ed..f0640b7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -28,7 +28,9 @@
 #include "xhci.h"
 #include "xhci-trace.h"
 
-#define PORT2_SSIC_CONFIG_REG2	0x883c
+#define SSIC_PORT_NUM		2
+#define SSIC_PORT_CFG2		0x880c
+#define SSIC_PORT_CFG2_OFFSET	0x30
 #define PROG_DONE		(1 << 30)
 #define SSIC_PORT_UNUSED	(1 << 31)
 
@@ -45,6 +47,7 @@
 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -151,9 +154,14 @@
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
-		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
 	}
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+	}
 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
 			pdev->device == PCI_DEVICE_ID_EJ168) {
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -312,22 +320,20 @@
  * SSIC PORT need to be marked as "unused" before putting xHCI
  * into D3. After D3 exit, the SSIC port need to be marked as "used".
  * Without this change, xHCI might not enter D3 state.
- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
  */
-static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
+static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
 {
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
-	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
 	u32 val;
 	void __iomem *reg;
+	int i;
 
-	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+	for (i = 0; i < SSIC_PORT_NUM; i++) {
+		reg = (void __iomem *) xhci->cap_regs +
+				SSIC_PORT_CFG2 +
+				i * SSIC_PORT_CFG2_OFFSET;
 
-		reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
-
-		/* Notify SSIC that SSIC profile programming is not done */
+		/* Notify SSIC that SSIC profile programming is not done. */
 		val = readl(reg) & ~PROG_DONE;
 		writel(val, reg);
 
@@ -344,6 +350,17 @@
 		writel(val, reg);
 		readl(reg);
 	}
+}
+
+/*
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	void __iomem *reg;
+	u32 val;
 
 	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
 	val = readl(reg);
@@ -355,6 +372,7 @@
 {
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	int			ret;
 
 	/*
 	 * Systems with the TI redriver that loses port status change events
@@ -364,9 +382,16 @@
 		pdev->no_d3cold = true;
 
 	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-		xhci_pme_quirk(hcd, true);
+		xhci_pme_quirk(hcd);
 
-	return xhci_suspend(xhci, do_wakeup);
+	if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+		xhci_ssic_port_unused_quirk(hcd, true);
+
+	ret = xhci_suspend(xhci, do_wakeup);
+	if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
+		xhci_ssic_port_unused_quirk(hcd, false);
+
+	return ret;
 }
 
 static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
@@ -396,8 +421,11 @@
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
 		usb_enable_intel_xhci_ports(pdev);
 
+	if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+		xhci_ssic_port_unused_quirk(hcd, false);
+
 	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-		xhci_pme_quirk(hcd, false);
+		xhci_pme_quirk(hcd);
 
 	retval = xhci_resume(xhci, hibernated);
 	return retval;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 770b6b0..d39d6bf 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -184,7 +184,8 @@
 		struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
 
 		/* Just copy data for now */
-		*priv = *priv_match;
+		if (priv_match)
+			*priv = *priv_match;
 	}
 
 	if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f1c21c4..3915657 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2193,10 +2193,6 @@
 		}
 	/* Fast path - was this the last TRB in the TD for this URB? */
 	} else if (event_trb == td->last_trb) {
-		if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
-			return finish_td(xhci, td, event_trb, event, ep,
-					 status, false);
-
 		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
 			td->urb->actual_length =
 				td->urb->transfer_buffer_length -
@@ -2248,12 +2244,6 @@
 			td->urb->actual_length +=
 				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
-		if (trb_comp_code == COMP_SHORT_TX) {
-			xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
-			td->urb_length_set = true;
-			return 0;
-		}
 	}
 
 	return finish_td(xhci, td, event_trb, event, ep, status, false);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 26a44c0..0c8087d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1554,7 +1554,9 @@
 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 				"HW died, freeing TD.");
 		urb_priv = urb->hcpriv;
-		for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+		for (i = urb_priv->td_cnt;
+		     i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+		     i++) {
 			td = urb_priv->td[i];
 			if (!list_empty(&td->td_list))
 				list_del_init(&td->td_list);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 9be7348..cc65138 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1631,6 +1631,7 @@
 #define XHCI_BROKEN_STREAMS	(1 << 19)
 #define XHCI_PME_STUCK_QUIRK	(1 << 20)
 #define XHCI_MTK_HOST		(1 << 21)
+#define XHCI_SSIC_PORT_UNUSED	(1 << 22)
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
 	/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index b2685e7..3eaa4ba 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -348,7 +348,9 @@
 	struct ux500_glue	*glue = dev_get_drvdata(dev);
 	struct musb		*musb = glue_to_musb(glue);
 
-	usb_phy_set_suspend(musb->xceiv, 1);
+	if (musb)
+		usb_phy_set_suspend(musb->xceiv, 1);
+
 	clk_disable_unprepare(glue->clk);
 
 	return 0;
@@ -366,7 +368,8 @@
 		return ret;
 	}
 
-	usb_phy_set_suspend(musb->xceiv, 0);
+	if (musb)
+		usb_phy_set_suspend(musb->xceiv, 0);
 
 	return 0;
 }
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 0d19a6d..970a30e 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1599,6 +1599,8 @@
 						&motg->id.nb);
 		if (ret < 0) {
 			dev_err(&pdev->dev, "register ID notifier failed\n");
+			extcon_unregister_notifier(motg->vbus.extcon,
+						   EXTCON_USB, &motg->vbus.nb);
 			return ret;
 		}
 
@@ -1660,15 +1662,6 @@
 	if (!motg)
 		return -ENOMEM;
 
-	pdata = dev_get_platdata(&pdev->dev);
-	if (!pdata) {
-		if (!np)
-			return -ENXIO;
-		ret = msm_otg_read_dt(pdev, motg);
-		if (ret)
-			return ret;
-	}
-
 	motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
 				     GFP_KERNEL);
 	if (!motg->phy.otg)
@@ -1710,6 +1703,15 @@
 	if (!motg->regs)
 		return -ENOMEM;
 
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		if (!np)
+			return -ENXIO;
+		ret = msm_otg_read_dt(pdev, motg);
+		if (ret)
+			return ret;
+	}
+
 	/*
 	 * NOTE: The PHYs can be multiplexed between the chipidea controller
 	 * and the dwc3 controller, using a single bit. It is important that
@@ -1717,8 +1719,10 @@
 	 */
 	if (motg->phy_number) {
 		phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
-		if (!phy_select)
-			return -ENOMEM;
+		if (!phy_select) {
+			ret = -ENOMEM;
+			goto unregister_extcon;
+		}
 		/* Enable second PHY with the OTG port */
 		writel(0x1, phy_select);
 	}
@@ -1728,7 +1732,8 @@
 	motg->irq = platform_get_irq(pdev, 0);
 	if (motg->irq < 0) {
 		dev_err(&pdev->dev, "platform_get_irq failed\n");
-		return motg->irq;
+		ret = motg->irq;
+		goto unregister_extcon;
 	}
 
 	regs[0].supply = "vddcx";
@@ -1737,7 +1742,7 @@
 
 	ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
 	if (ret)
-		return ret;
+		goto unregister_extcon;
 
 	motg->vddcx = regs[0].consumer;
 	motg->v3p3  = regs[1].consumer;
@@ -1834,6 +1839,12 @@
 	clk_disable_unprepare(motg->clk);
 	if (!IS_ERR(motg->core_clk))
 		clk_disable_unprepare(motg->core_clk);
+unregister_extcon:
+	extcon_unregister_notifier(motg->id.extcon,
+				   EXTCON_USB_HOST, &motg->id.nb);
+	extcon_unregister_notifier(motg->vbus.extcon,
+				   EXTCON_USB, &motg->vbus.nb);
+
 	return ret;
 }
 
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index c2936dc..00bfea0 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -220,7 +220,7 @@
 /* Return true if the vbus is there */
 static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
 {
-	unsigned int vbus_value;
+	unsigned int vbus_value = 0;
 
 	if (!mxs_phy->regmap_anatop)
 		return false;
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 0081725..6b2a06d 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -152,7 +152,7 @@
 
 struct da8xx_fb_par {
 	struct device		*dev;
-	resource_size_t p_palette_base;
+	dma_addr_t		p_palette_base;
 	unsigned char *v_palette_base;
 	dma_addr_t		vram_phys;
 	unsigned long		vram_size;
@@ -1428,7 +1428,7 @@
 
 	par->vram_virt = dma_alloc_coherent(NULL,
 					    par->vram_size,
-					    (resource_size_t *) &par->vram_phys,
+					    &par->vram_phys,
 					    GFP_KERNEL | GFP_DMA);
 	if (!par->vram_virt) {
 		dev_err(&device->dev,
@@ -1448,7 +1448,7 @@
 
 	/* allocate palette buffer */
 	par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
-						  (resource_size_t *)&par->p_palette_base,
+						  &par->p_palette_base,
 						  GFP_KERNEL | GFP_DMA);
 	if (!par->v_palette_base) {
 		dev_err(&device->dev,
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 95873f2..de2f3e7 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -829,8 +829,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
+static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
 {
 	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
 
@@ -843,7 +842,7 @@
 	return 0;
 }
 
-static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
+static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
 {
 	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
 
@@ -855,10 +854,6 @@
 
 	return 0;
 }
-#else
-#define s6e8ax0_suspend		NULL
-#define s6e8ax0_resume		NULL
-#endif
 
 static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
 	.name = "s6e8ax0",
@@ -867,8 +862,8 @@
 	.power_on = s6e8ax0_power_on,
 	.set_sequence = s6e8ax0_set_sequence,
 	.probe = s6e8ax0_probe,
-	.suspend = s6e8ax0_suspend,
-	.resume = s6e8ax0_resume,
+	.suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
+	.resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
 };
 
 static int s6e8ax0_init(void)
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index cee8860..bb2f1e8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -902,6 +902,21 @@
 		goto failed_getclock;
 	}
 
+	/*
+	 * The LCDC controller does not have an enable bit. The
+	 * controller starts directly when the clocks are enabled.
+	 * If the clocks are enabled when the controller is not yet
+	 * programmed with proper register values (enabled at the
+	 * bootloader, for example) then it just goes into some undefined
+	 * state.
+	 * To avoid this issue, let's enable and disable LCDC IPG clock
+	 * so that we force some kind of 'reset' to the LCDC block.
+	 */
+	ret = clk_prepare_enable(fbi->clk_ipg);
+	if (ret)
+		goto failed_getclock;
+	clk_disable_unprepare(fbi->clk_ipg);
+
 	fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
 	if (IS_ERR(fbi->clk_ahb)) {
 		ret = PTR_ERR(fbi->clk_ahb);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index de54a47..b6f83d5 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -503,8 +503,7 @@
 	ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
 			res->start, resource_size(res));
 	if (ctrl->reg_base == NULL) {
-		dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
-			res->start, res->end);
+		dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
 		ret = -ENOMEM;
 		goto failed;
 	}
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index c9293ae..a970edc2 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -123,11 +123,11 @@
 
 	/* Horizontal timings */
 	ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
-		      (var->right_margin - 1) << 16 | (var->xres - 1));
+		      (var->left_margin - 1) << 16 | (var->xres - 1));
 
 	/* Vertical timings */
 	ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
-		      (var->lower_margin - 1) << 16 | (var->yres - 1));
+		      (var->upper_margin - 1) << 16 | (var->yres - 1));
 
 	/* Total length of frame */
 	hlen = var->left_margin + var->right_margin + var->hsync_len +
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4f0e7be..0f6d851 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -145,7 +145,8 @@
 config TANGOX_WATCHDOG
 	tristate "Sigma Designs SMP86xx/SMP87xx watchdog"
 	select WATCHDOG_CORE
-	depends on ARCH_TANGOX || COMPILE_TEST
+	depends on ARCH_TANGO || COMPILE_TEST
+	depends on HAS_IOMEM
 	help
 	  Support for the watchdog in Sigma Designs SMP86xx (tango3)
 	  and SMP87xx (tango4) family chips.
@@ -618,6 +619,7 @@
 config LPC18XX_WATCHDOG
 	tristate "LPC18xx/43xx Watchdog"
 	depends on ARCH_LPC18XX || COMPILE_TEST
+	depends on HAS_IOMEM
 	select WATCHDOG_CORE
 	help
 	  Say Y here if to include support for the watchdog timer
@@ -1374,6 +1376,7 @@
 config BCM7038_WDT
 	tristate "BCM7038 Watchdog"
 	select WATCHDOG_CORE
+	depends on HAS_IOMEM
 	help
 	 Watchdog driver for the built-in hardware in Broadcom 7038 SoCs.
 
@@ -1383,6 +1386,7 @@
 	tristate "Imagination Technologies PDC Watchdog Timer"
 	depends on HAS_IOMEM
 	depends on METAG || MIPS || COMPILE_TEST
+	select WATCHDOG_CORE
 	help
 	  Driver for Imagination Technologies PowerDown Controller
 	  Watchdog Timer.
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index f36ca4b..ac5840d 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -292,4 +292,4 @@
 		 "Force selection of a timeout setting without initial delay "
 		 "(max6373/74 only, default=0)");
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 1a11aed..68952d9 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -608,7 +608,7 @@
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor *endpoint;
 	struct usb_pcwd_private *usb_pcwd = NULL;
-	int pipe, maxp;
+	int pipe;
 	int retval = -ENOMEM;
 	int got_fw_rev;
 	unsigned char fw_rev_major, fw_rev_minor;
@@ -641,7 +641,6 @@
 
 	/* get a handle to the interrupt data pipe */
 	pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
-	maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
 
 	/* allocate memory for our device and initialize it */
 	usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 01d8162..e7a715e 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -139,12 +139,11 @@
 
 	writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
 	writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+	writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
 
-	if (!ping) {
-		writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+	if (!ping)
 		writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
 				WDTCONTROL);
-	}
 
 	writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
diff --git a/fs/block_dev.c b/fs/block_dev.c
index afb4374..39b3a17 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1730,6 +1730,12 @@
 	return __dax_fault(vma, vmf, blkdev_get_block, NULL);
 }
 
+static int blkdev_dax_pfn_mkwrite(struct vm_area_struct *vma,
+		struct vm_fault *vmf)
+{
+	return dax_pfn_mkwrite(vma, vmf);
+}
+
 static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
 		pmd_t *pmd, unsigned int flags)
 {
@@ -1739,7 +1745,7 @@
 static const struct vm_operations_struct blkdev_dax_vm_ops = {
 	.fault		= blkdev_dax_fault,
 	.pmd_fault	= blkdev_dax_pmd_fault,
-	.pfn_mkwrite	= blkdev_dax_fault,
+	.pfn_mkwrite	= blkdev_dax_pfn_mkwrite,
 };
 
 static const struct vm_operations_struct blkdev_default_vm_ops = {
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b90cd37..f6dac40 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1406,7 +1406,8 @@
 			read_extent_buffer(eb, dest + bytes_left,
 					   name_off, name_len);
 		if (eb != eb_in) {
-			btrfs_tree_read_unlock_blocking(eb);
+			if (!path->skip_locking)
+				btrfs_tree_read_unlock_blocking(eb);
 			free_extent_buffer(eb);
 		}
 		ret = btrfs_find_item(fs_root, path, parent, 0,
@@ -1426,9 +1427,10 @@
 		eb = path->nodes[0];
 		/* make sure we can use eb after releasing the path */
 		if (eb != eb_in) {
-			atomic_inc(&eb->refs);
-			btrfs_tree_read_lock(eb);
-			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+			if (!path->skip_locking)
+				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+			path->nodes[0] = NULL;
+			path->locks[0] = 0;
 		}
 		btrfs_release_path(path);
 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c473c42..3346cd8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -637,11 +637,7 @@
 	faili = nr_pages - 1;
 	cb->nr_pages = nr_pages;
 
-	/* In the parent-locked case, we only locked the range we are
-	 * interested in.  In all other cases, we can opportunistically
-	 * cache decompressed data that goes beyond the requested range. */
-	if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED))
-		add_ra_bio_pages(inode, em_start + em_len, cb);
+	add_ra_bio_pages(inode, em_start + em_len, cb);
 
 	/* include any pages we added in add_ra-bio_pages */
 	uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 0be47e4..b57daa8 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1689,7 +1689,7 @@
  *
  */
 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
-				    struct list_head *ins_list)
+				    struct list_head *ins_list, bool *emitted)
 {
 	struct btrfs_dir_item *di;
 	struct btrfs_delayed_item *curr, *next;
@@ -1733,6 +1733,7 @@
 
 		if (over)
 			return 1;
+		*emitted = true;
 	}
 	return 0;
 }
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index f70119f..0167853 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -144,7 +144,7 @@
 int btrfs_should_delete_dir_index(struct list_head *del_list,
 				  u64 index);
 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
-				    struct list_head *ins_list);
+				    struct list_head *ins_list, bool *emitted);
 
 /* for init */
 int __init btrfs_delayed_inode_init(void);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2e7c97a..392592d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2897,12 +2897,11 @@
 	struct block_device *bdev;
 	int ret;
 	int nr = 0;
-	int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
 	size_t pg_offset = 0;
 	size_t iosize;
 	size_t disk_io_size;
 	size_t blocksize = inode->i_sb->s_blocksize;
-	unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
+	unsigned long this_bio_flag = 0;
 
 	set_page_extent_mapped(page);
 
@@ -2942,18 +2941,16 @@
 			kunmap_atomic(userpage);
 			set_extent_uptodate(tree, cur, cur + iosize - 1,
 					    &cached, GFP_NOFS);
-			if (!parent_locked)
-				unlock_extent_cached(tree, cur,
-						     cur + iosize - 1,
-						     &cached, GFP_NOFS);
+			unlock_extent_cached(tree, cur,
+					     cur + iosize - 1,
+					     &cached, GFP_NOFS);
 			break;
 		}
 		em = __get_extent_map(inode, page, pg_offset, cur,
 				      end - cur + 1, get_extent, em_cached);
 		if (IS_ERR_OR_NULL(em)) {
 			SetPageError(page);
-			if (!parent_locked)
-				unlock_extent(tree, cur, end);
+			unlock_extent(tree, cur, end);
 			break;
 		}
 		extent_offset = cur - em->start;
@@ -3038,12 +3035,9 @@
 
 			set_extent_uptodate(tree, cur, cur + iosize - 1,
 					    &cached, GFP_NOFS);
-			if (parent_locked)
-				free_extent_state(cached);
-			else
-				unlock_extent_cached(tree, cur,
-						     cur + iosize - 1,
-						     &cached, GFP_NOFS);
+			unlock_extent_cached(tree, cur,
+					     cur + iosize - 1,
+					     &cached, GFP_NOFS);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -3052,8 +3046,7 @@
 		if (test_range_bit(tree, cur, cur_end,
 				   EXTENT_UPTODATE, 1, NULL)) {
 			check_page_uptodate(tree, page);
-			if (!parent_locked)
-				unlock_extent(tree, cur, cur + iosize - 1);
+			unlock_extent(tree, cur, cur + iosize - 1);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -3063,8 +3056,7 @@
 		 */
 		if (block_start == EXTENT_MAP_INLINE) {
 			SetPageError(page);
-			if (!parent_locked)
-				unlock_extent(tree, cur, cur + iosize - 1);
+			unlock_extent(tree, cur, cur + iosize - 1);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -3083,8 +3075,7 @@
 			*bio_flags = this_bio_flag;
 		} else {
 			SetPageError(page);
-			if (!parent_locked)
-				unlock_extent(tree, cur, cur + iosize - 1);
+			unlock_extent(tree, cur, cur + iosize - 1);
 		}
 		cur = cur + iosize;
 		pg_offset += iosize;
@@ -3213,20 +3204,6 @@
 	return ret;
 }
 
-int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
-				 get_extent_t *get_extent, int mirror_num)
-{
-	struct bio *bio = NULL;
-	unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
-	int ret;
-
-	ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
-			    &bio_flags, READ, NULL);
-	if (bio)
-		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
-	return ret;
-}
-
 static noinline void update_nr_written(struct page *page,
 				      struct writeback_control *wbc,
 				      unsigned long nr_written)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 0377413..880d529 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -29,7 +29,6 @@
  */
 #define EXTENT_BIO_COMPRESSED 1
 #define EXTENT_BIO_TREE_LOG 2
-#define EXTENT_BIO_PARENT_LOCKED 4
 #define EXTENT_BIO_FLAG_SHIFT 16
 
 /* these are bit numbers for test/set bit */
@@ -210,8 +209,6 @@
 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
 			  get_extent_t *get_extent, int mirror_num);
-int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
-				 get_extent_t *get_extent, int mirror_num);
 int __init extent_io_init(void);
 void extent_io_exit(void);
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5f06eb1..d96f5cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5717,6 +5717,7 @@
 	char *name_ptr;
 	int name_len;
 	int is_curr = 0;	/* ctx->pos points to the current index? */
+	bool emitted;
 
 	/* FIXME, use a real flag for deciding about the key type */
 	if (root->fs_info->tree_root == root)
@@ -5745,6 +5746,7 @@
 	if (ret < 0)
 		goto err;
 
+	emitted = false;
 	while (1) {
 		leaf = path->nodes[0];
 		slot = path->slots[0];
@@ -5824,6 +5826,7 @@
 
 			if (over)
 				goto nopos;
+			emitted = true;
 			di_len = btrfs_dir_name_len(leaf, di) +
 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
 			di_cur += di_len;
@@ -5836,11 +5839,20 @@
 	if (key_type == BTRFS_DIR_INDEX_KEY) {
 		if (is_curr)
 			ctx->pos++;
-		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
+		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
 		if (ret)
 			goto nopos;
 	}
 
+	/*
+	 * If we haven't emitted any dir entry, we must not touch ctx->pos as
+	 * it was was set to the termination value in previous call. We assume
+	 * that "." and ".." were emitted if we reach this point and set the
+	 * termination value as well for an empty directory.
+	 */
+	if (ctx->pos > 2 && !emitted)
+		goto nopos;
+
 	/* Reached end of directory/root. Bump pos past the last item. */
 	ctx->pos++;
 
@@ -7974,6 +7986,7 @@
 
 	kfree(dip);
 
+	dio_bio->bi_error = bio->bi_error;
 	dio_end_io(dio_bio, bio->bi_error);
 
 	if (io_bio->end_io)
@@ -8028,6 +8041,7 @@
 
 	kfree(dip);
 
+	dio_bio->bi_error = bio->bi_error;
 	dio_end_io(dio_bio, bio->bi_error);
 	bio_put(bio);
 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 952172c..48aee98 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2794,24 +2794,29 @@
 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
 {
 	struct page *page;
-	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 
 	page = grab_cache_page(inode->i_mapping, index);
 	if (!page)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	if (!PageUptodate(page)) {
-		if (extent_read_full_page_nolock(tree, page, btrfs_get_extent,
-						 0))
-			return NULL;
+		int ret;
+
+		ret = btrfs_readpage(NULL, page);
+		if (ret)
+			return ERR_PTR(ret);
 		lock_page(page);
 		if (!PageUptodate(page)) {
 			unlock_page(page);
 			page_cache_release(page);
-			return NULL;
+			return ERR_PTR(-EIO);
+		}
+		if (page->mapping != inode->i_mapping) {
+			unlock_page(page);
+			page_cache_release(page);
+			return ERR_PTR(-EAGAIN);
 		}
 	}
-	unlock_page(page);
 
 	return page;
 }
@@ -2823,17 +2828,31 @@
 	pgoff_t index = off >> PAGE_CACHE_SHIFT;
 
 	for (i = 0; i < num_pages; i++) {
+again:
 		pages[i] = extent_same_get_page(inode, index + i);
-		if (!pages[i])
-			return -ENOMEM;
+		if (IS_ERR(pages[i])) {
+			int err = PTR_ERR(pages[i]);
+
+			if (err == -EAGAIN)
+				goto again;
+			pages[i] = NULL;
+			return err;
+		}
 	}
 	return 0;
 }
 
-static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
+static int lock_extent_range(struct inode *inode, u64 off, u64 len,
+			     bool retry_range_locking)
 {
-	/* do any pending delalloc/csum calc on src, one way or
-	   another, and lock file content */
+	/*
+	 * Do any pending delalloc/csum calculations on inode, one way or
+	 * another, and lock file content.
+	 * The locking order is:
+	 *
+	 *   1) pages
+	 *   2) range in the inode's io tree
+	 */
 	while (1) {
 		struct btrfs_ordered_extent *ordered;
 		lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
@@ -2851,8 +2870,11 @@
 		unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
 		if (ordered)
 			btrfs_put_ordered_extent(ordered);
+		if (!retry_range_locking)
+			return -EAGAIN;
 		btrfs_wait_ordered_range(inode, off, len);
 	}
+	return 0;
 }
 
 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
@@ -2877,15 +2899,24 @@
 	unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
 }
 
-static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
-				     struct inode *inode2, u64 loff2, u64 len)
+static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+				    struct inode *inode2, u64 loff2, u64 len,
+				    bool retry_range_locking)
 {
+	int ret;
+
 	if (inode1 < inode2) {
 		swap(inode1, inode2);
 		swap(loff1, loff2);
 	}
-	lock_extent_range(inode1, loff1, len);
-	lock_extent_range(inode2, loff2, len);
+	ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
+	if (ret)
+		return ret;
+	ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
+	if (ret)
+		unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
+			      loff1 + len - 1);
+	return ret;
 }
 
 struct cmp_pages {
@@ -2901,11 +2932,15 @@
 
 	for (i = 0; i < cmp->num_pages; i++) {
 		pg = cmp->src_pages[i];
-		if (pg)
+		if (pg) {
+			unlock_page(pg);
 			page_cache_release(pg);
+		}
 		pg = cmp->dst_pages[i];
-		if (pg)
+		if (pg) {
+			unlock_page(pg);
 			page_cache_release(pg);
+		}
 	}
 	kfree(cmp->src_pages);
 	kfree(cmp->dst_pages);
@@ -2966,6 +3001,8 @@
 
 		src_page = cmp->src_pages[i];
 		dst_page = cmp->dst_pages[i];
+		ASSERT(PageLocked(src_page));
+		ASSERT(PageLocked(dst_page));
 
 		addr = kmap_atomic(src_page);
 		dst_addr = kmap_atomic(dst_page);
@@ -3078,14 +3115,46 @@
 		goto out_unlock;
 	}
 
+again:
 	ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
 	if (ret)
 		goto out_unlock;
 
 	if (same_inode)
-		lock_extent_range(src, same_lock_start, same_lock_len);
+		ret = lock_extent_range(src, same_lock_start, same_lock_len,
+					false);
 	else
-		btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+		ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
+					       false);
+	/*
+	 * If one of the inodes has dirty pages in the respective range or
+	 * ordered extents, we need to flush dellaloc and wait for all ordered
+	 * extents in the range. We must unlock the pages and the ranges in the
+	 * io trees to avoid deadlocks when flushing delalloc (requires locking
+	 * pages) and when waiting for ordered extents to complete (they require
+	 * range locking).
+	 */
+	if (ret == -EAGAIN) {
+		/*
+		 * Ranges in the io trees already unlocked. Now unlock all
+		 * pages before waiting for all IO to complete.
+		 */
+		btrfs_cmp_data_free(&cmp);
+		if (same_inode) {
+			btrfs_wait_ordered_range(src, same_lock_start,
+						 same_lock_len);
+		} else {
+			btrfs_wait_ordered_range(src, loff, len);
+			btrfs_wait_ordered_range(dst, dst_loff, len);
+		}
+		goto again;
+	}
+	ASSERT(ret == 0);
+	if (WARN_ON(ret)) {
+		/* ranges in the io trees already unlocked */
+		btrfs_cmp_data_free(&cmp);
+		return ret;
+	}
 
 	/* pass original length for comparison so we stay within i_size */
 	ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
@@ -3795,9 +3864,15 @@
 		u64 lock_start = min_t(u64, off, destoff);
 		u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
 
-		lock_extent_range(src, lock_start, lock_len);
+		ret = lock_extent_range(src, lock_start, lock_len, true);
 	} else {
-		btrfs_double_extent_lock(src, off, inode, destoff, len);
+		ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
+					       true);
+	}
+	ASSERT(ret == 0);
+	if (WARN_ON(ret)) {
+		/* ranges in the io trees already unlocked */
+		goto out_unlock;
 	}
 
 	ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 86a9c38..eb9028e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -698,8 +698,8 @@
 
 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
 			false, GFP_NOFS);
-	if (IS_ERR(req)) {
-		ret = PTR_ERR(req);
+	if (!req) {
+		ret = -ENOMEM;
 		req = orig_req;
 		goto out;
 	}
@@ -716,7 +716,6 @@
 	ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
 				snapc, CEPH_NOSNAP, &aio_req->mtime);
 
-	ceph_put_snap_context(snapc);
 	ceph_osdc_put_request(orig_req);
 
 	req->r_callback = ceph_aio_complete_req;
@@ -731,6 +730,7 @@
 		ceph_aio_complete_req(req, NULL);
 	}
 
+	ceph_put_snap_context(snapc);
 	kfree(aio_work);
 }
 
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 7dc886c..e956cba 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -175,7 +175,7 @@
 	 * string to the length of the original string to allow for worst case.
 	 */
 	md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
-	mountdata = kzalloc(md_len + 1, GFP_KERNEL);
+	mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
 	if (mountdata == NULL) {
 		rc = -ENOMEM;
 		goto compose_mount_options_err;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index afa09fc..e682b36 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -714,7 +714,7 @@
 
 	ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
 	if (!ses->auth_key.response) {
-		rc = ENOMEM;
+		rc = -ENOMEM;
 		ses->auth_key.len = 0;
 		goto setup_ntlmv2_rsp_ret;
 	}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4fbd92d..a763cd3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2999,8 +2999,7 @@
 	if (ses_init_buf) {
 		ses_init_buf->trailer.session_req.called_len = 32;
 
-		if (server->server_RFC1001_name &&
-		    server->server_RFC1001_name[0] != 0)
+		if (server->server_RFC1001_name[0] != 0)
 			rfc1002mangle(ses_init_buf->trailer.
 				      session_req.called_name,
 				      server->server_RFC1001_name,
diff --git a/fs/dax.c b/fs/dax.c
index e0e9358..fc2e314 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -358,7 +358,8 @@
 	void *entry;
 
 	WARN_ON_ONCE(pmd_entry && !dirty);
-	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+	if (dirty)
+		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
 	spin_lock_irq(&mapping->tree_lock);
 
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 1f107fd..655f21f 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -575,6 +575,26 @@
 	mutex_unlock(&allocated_ptys_lock);
 }
 
+/*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+
+void devpts_add_ref(struct inode *ptmx_inode)
+{
+	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+	atomic_inc(&sb->s_active);
+	ihold(ptmx_inode);
+}
+
+void devpts_del_ref(struct inode *ptmx_inode)
+{
+	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+	iput(ptmx_inode);
+	deactivate_super(sb);
+}
+
 /**
  * devpts_pty_new -- create a new inode in /dev/pts/
  * @ptmx_inode: inode of the master
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1b2f7ff..d6a9012 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -472,8 +472,8 @@
 		dio->io_error = -EIO;
 
 	if (dio->is_async && dio->rw == READ && dio->should_dirty) {
-		bio_check_pages_dirty(bio);	/* transfers ownership */
 		err = bio->bi_error;
+		bio_check_pages_dirty(bio);	/* transfers ownership */
 	} else {
 		bio_for_each_segment_all(bvec, bio, i) {
 			struct page *page = bvec->bv_page;
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index c424e48..d48e0d2 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -10,6 +10,7 @@
 #include <linux/efi.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
+#include <linux/mount.h>
 
 #include "internal.h"
 
@@ -103,9 +104,78 @@
 	return size;
 }
 
+static int
+efivarfs_ioc_getxflags(struct file *file, void __user *arg)
+{
+	struct inode *inode = file->f_mapping->host;
+	unsigned int i_flags;
+	unsigned int flags = 0;
+
+	i_flags = inode->i_flags;
+	if (i_flags & S_IMMUTABLE)
+		flags |= FS_IMMUTABLE_FL;
+
+	if (copy_to_user(arg, &flags, sizeof(flags)))
+		return -EFAULT;
+	return 0;
+}
+
+static int
+efivarfs_ioc_setxflags(struct file *file, void __user *arg)
+{
+	struct inode *inode = file->f_mapping->host;
+	unsigned int flags;
+	unsigned int i_flags = 0;
+	int error;
+
+	if (!inode_owner_or_capable(inode))
+		return -EACCES;
+
+	if (copy_from_user(&flags, arg, sizeof(flags)))
+		return -EFAULT;
+
+	if (flags & ~FS_IMMUTABLE_FL)
+		return -EOPNOTSUPP;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+
+	if (flags & FS_IMMUTABLE_FL)
+		i_flags |= S_IMMUTABLE;
+
+
+	error = mnt_want_write_file(file);
+	if (error)
+		return error;
+
+	inode_lock(inode);
+	inode_set_flags(inode, i_flags, S_IMMUTABLE);
+	inode_unlock(inode);
+
+	mnt_drop_write_file(file);
+
+	return 0;
+}
+
+long
+efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
+{
+	void __user *arg = (void __user *)p;
+
+	switch (cmd) {
+	case FS_IOC_GETFLAGS:
+		return efivarfs_ioc_getxflags(file, arg);
+	case FS_IOC_SETFLAGS:
+		return efivarfs_ioc_setxflags(file, arg);
+	}
+
+	return -ENOTTY;
+}
+
 const struct file_operations efivarfs_file_operations = {
 	.open	= simple_open,
 	.read	= efivarfs_file_read,
 	.write	= efivarfs_file_write,
 	.llseek	= no_llseek,
+	.unlocked_ioctl = efivarfs_file_ioctl,
 };
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 3381b9d..e2ab6d0 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -15,7 +15,8 @@
 #include "internal.h"
 
 struct inode *efivarfs_get_inode(struct super_block *sb,
-				const struct inode *dir, int mode, dev_t dev)
+				const struct inode *dir, int mode,
+				dev_t dev, bool is_removable)
 {
 	struct inode *inode = new_inode(sb);
 
@@ -23,6 +24,7 @@
 		inode->i_ino = get_next_ino();
 		inode->i_mode = mode;
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
 		switch (mode & S_IFMT) {
 		case S_IFREG:
 			inode->i_fop = &efivarfs_file_operations;
@@ -102,22 +104,17 @@
 static int efivarfs_create(struct inode *dir, struct dentry *dentry,
 			  umode_t mode, bool excl)
 {
-	struct inode *inode;
+	struct inode *inode = NULL;
 	struct efivar_entry *var;
 	int namelen, i = 0, err = 0;
+	bool is_removable = false;
 
 	if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
 		return -EINVAL;
 
-	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
-	if (!inode)
-		return -ENOMEM;
-
 	var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
-	if (!var) {
-		err = -ENOMEM;
-		goto out;
-	}
+	if (!var)
+		return -ENOMEM;
 
 	/* length of the variable name itself: remove GUID and separator */
 	namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
@@ -125,6 +122,16 @@
 	efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
 			&var->var.VendorGuid);
 
+	if (efivar_variable_is_removable(var->var.VendorGuid,
+					 dentry->d_name.name, namelen))
+		is_removable = true;
+
+	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
+	if (!inode) {
+		err = -ENOMEM;
+		goto out;
+	}
+
 	for (i = 0; i < namelen; i++)
 		var->var.VariableName[i] = dentry->d_name.name[i];
 
@@ -138,7 +145,8 @@
 out:
 	if (err) {
 		kfree(var);
-		iput(inode);
+		if (inode)
+			iput(inode);
 	}
 	return err;
 }
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index b5ff16a..b450518 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -15,7 +15,8 @@
 extern const struct inode_operations efivarfs_dir_inode_operations;
 extern bool efivarfs_valid_name(const char *str, int len);
 extern struct inode *efivarfs_get_inode(struct super_block *sb,
-			const struct inode *dir, int mode, dev_t dev);
+			const struct inode *dir, int mode, dev_t dev,
+			bool is_removable);
 
 extern struct list_head efivarfs_list;
 
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index b8a564f..dd029d1 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -118,8 +118,9 @@
 	struct dentry *dentry, *root = sb->s_root;
 	unsigned long size = 0;
 	char *name;
-	int len, i;
+	int len;
 	int err = -ENOMEM;
+	bool is_removable = false;
 
 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
@@ -128,15 +129,17 @@
 	memcpy(entry->var.VariableName, name16, name_size);
 	memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
 
-	len = ucs2_strlen(entry->var.VariableName);
+	len = ucs2_utf8size(entry->var.VariableName);
 
 	/* name, plus '-', plus GUID, plus NUL*/
 	name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
 	if (!name)
 		goto fail;
 
-	for (i = 0; i < len; i++)
-		name[i] = entry->var.VariableName[i] & 0xFF;
+	ucs2_as_utf8(name, entry->var.VariableName, len);
+
+	if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+		is_removable = true;
 
 	name[len] = '-';
 
@@ -144,7 +147,8 @@
 
 	name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
 
-	inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
+	inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
+				   is_removable);
 	if (!inode)
 		goto fail_name;
 
@@ -200,7 +204,7 @@
 	sb->s_d_op		= &efivarfs_d_ops;
 	sb->s_time_gran         = 1;
 
-	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
 	if (!inode)
 		return -ENOMEM;
 	inode->i_op = &efivarfs_dir_inode_operations;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ae1dbcf..cde6074 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -94,6 +94,11 @@
 /* Epoll private bits inside the event mask */
 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
 
+#define EPOLLINOUT_BITS (POLLIN | POLLOUT)
+
+#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \
+				EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
+
 /* Maximum number of nesting allowed inside epoll sets */
 #define EP_MAX_NESTS 4
 
@@ -1068,7 +1073,22 @@
 	 * wait list.
 	 */
 	if (waitqueue_active(&ep->wq)) {
-		ewake = 1;
+		if ((epi->event.events & EPOLLEXCLUSIVE) &&
+					!((unsigned long)key & POLLFREE)) {
+			switch ((unsigned long)key & EPOLLINOUT_BITS) {
+			case POLLIN:
+				if (epi->event.events & POLLIN)
+					ewake = 1;
+				break;
+			case POLLOUT:
+				if (epi->event.events & POLLOUT)
+					ewake = 1;
+				break;
+			case 0:
+				ewake = 1;
+				break;
+			}
+		}
 		wake_up_locked(&ep->wq);
 	}
 	if (waitqueue_active(&ep->poll_wait))
@@ -1875,9 +1895,13 @@
 	 * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
 	 * Also, we do not currently supported nested exclusive wakeups.
 	 */
-	if ((epds.events & EPOLLEXCLUSIVE) && (op == EPOLL_CTL_MOD ||
-		(op == EPOLL_CTL_ADD && is_file_epoll(tf.file))))
-		goto error_tgt_fput;
+	if (epds.events & EPOLLEXCLUSIVE) {
+		if (op == EPOLL_CTL_MOD)
+			goto error_tgt_fput;
+		if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
+				(epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
+			goto error_tgt_fput;
+	}
 
 	/*
 	 * At this point it is safe to assume that the "private_data" contains
@@ -1950,8 +1974,10 @@
 		break;
 	case EPOLL_CTL_MOD:
 		if (epi) {
-			epds.events |= POLLERR | POLLHUP;
-			error = ep_modify(ep, epi, &epds);
+			if (!(epi->event.events & EPOLLEXCLUSIVE)) {
+				epds.events |= POLLERR | POLLHUP;
+				error = ep_modify(ep, epi, &epds);
+			}
 		} else
 			error = -ENOENT;
 		break;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ec0668a..fe1f50f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -191,7 +191,6 @@
 	/* If checksum is bad mark all blocks used to prevent allocation
 	 * essentially implementing a per-group read-only flag. */
 	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
-		ext4_error(sb, "Checksum bad for group %u", block_group);
 		grp = ext4_get_group_info(sb, block_group);
 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
 			percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -442,14 +441,16 @@
 	}
 	ext4_lock_group(sb, block_group);
 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-
 		err = ext4_init_block_bitmap(sb, bh, block_group, desc);
 		set_bitmap_uptodate(bh);
 		set_buffer_uptodate(bh);
 		ext4_unlock_group(sb, block_group);
 		unlock_buffer(bh);
-		if (err)
+		if (err) {
+			ext4_error(sb, "Failed to init block bitmap for group "
+				   "%u: %d", block_group, err);
 			goto out;
+		}
 		goto verify;
 	}
 	ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index c802120..38f7562 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -467,3 +467,59 @@
 		return size;
 	return 0;
 }
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	struct inode *dir = d_inode(dentry->d_parent);
+	struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
+	int dir_has_key, cached_with_key;
+
+	if (!ext4_encrypted_inode(dir))
+		return 0;
+
+	if (ci && ci->ci_keyring_key &&
+	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+					  (1 << KEY_FLAG_REVOKED) |
+					  (1 << KEY_FLAG_DEAD))))
+		ci = NULL;
+
+	/* this should eventually be an flag in d_flags */
+	cached_with_key = dentry->d_fsdata != NULL;
+	dir_has_key = (ci != NULL);
+
+	/*
+	 * If the dentry was cached without the key, and it is a
+	 * negative dentry, it might be a valid name.  We can't check
+	 * if the key has since been made available due to locking
+	 * reasons, so we fail the validation so ext4_lookup() can do
+	 * this check.
+	 *
+	 * We also fail the validation if the dentry was created with
+	 * the key present, but we no longer have the key, or vice versa.
+	 */
+	if ((!cached_with_key && d_is_negative(dentry)) ||
+	    (!cached_with_key && dir_has_key) ||
+	    (cached_with_key && !dir_has_key)) {
+#if 0				/* Revalidation debug */
+		char buf[80];
+		char *cp = simple_dname(dentry, buf, sizeof(buf));
+
+		if (IS_ERR(cp))
+			cp = (char *) "???";
+		pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
+		       cached_with_key, d_is_negative(dentry),
+		       dir_has_key);
+#endif
+		return 0;
+	}
+	return 1;
+}
+
+const struct dentry_operations ext4_encrypted_d_ops = {
+	.d_revalidate = ext4_d_revalidate,
+};
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 1d1bca7..33f5e2a 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -111,6 +111,12 @@
 	int dir_has_error = 0;
 	struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
 
+	if (ext4_encrypted_inode(inode)) {
+		err = ext4_get_encryption_info(inode);
+		if (err && err != -ENOKEY)
+			return err;
+	}
+
 	if (is_dx_dir(inode)) {
 		err = ext4_dx_readdir(file, ctx);
 		if (err != ERR_BAD_DX_DIR) {
@@ -157,8 +163,11 @@
 					index, 1);
 			file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
 			bh = ext4_bread(NULL, inode, map.m_lblk, 0);
-			if (IS_ERR(bh))
-				return PTR_ERR(bh);
+			if (IS_ERR(bh)) {
+				err = PTR_ERR(bh);
+				bh = NULL;
+				goto errout;
+			}
 		}
 
 		if (!bh) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0662b28..157b458 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2302,6 +2302,7 @@
 int ext4_decrypt(struct page *page);
 int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 			   ext4_fsblk_t pblk, ext4_lblk_t len);
+extern const struct dentry_operations ext4_encrypted_d_ops;
 
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
 int ext4_init_crypto(void);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0ffabaf..3753ceb 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3928,7 +3928,7 @@
 convert_initialized_extent(handle_t *handle, struct inode *inode,
 			   struct ext4_map_blocks *map,
 			   struct ext4_ext_path **ppath, int flags,
-			   unsigned int allocated, ext4_fsblk_t newblock)
+			   unsigned int allocated)
 {
 	struct ext4_ext_path *path = *ppath;
 	struct ext4_extent *ex;
@@ -4347,7 +4347,7 @@
 			    (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
 				allocated = convert_initialized_extent(
 						handle, inode, map, &path,
-						flags, allocated, newblock);
+						flags, allocated);
 				goto out2;
 			} else if (!ext4_ext_is_unwritten(ex))
 				goto out;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 1126436..474f1a4 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -350,6 +350,7 @@
 	struct super_block *sb = inode->i_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 	struct vfsmount *mnt = filp->f_path.mnt;
+	struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
 	struct path path;
 	char buf[64], *cp;
 	int ret;
@@ -393,6 +394,14 @@
 		if (ext4_encryption_info(inode) == NULL)
 			return -ENOKEY;
 	}
+	if (ext4_encrypted_inode(dir) &&
+	    !ext4_is_child_context_consistent_with_parent(dir, inode)) {
+		ext4_warning(inode->i_sb,
+			     "Inconsistent encryption contexts: %lu/%lu\n",
+			     (unsigned long) dir->i_ino,
+			     (unsigned long) inode->i_ino);
+		return -EPERM;
+	}
 	/*
 	 * Set up the jbd2_inode if we are opening the inode for
 	 * writing and the journal is present
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3fcfd50..acc0ad5 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,7 +76,6 @@
 	/* If checksum is bad mark all blocks and inodes use to prevent
 	 * allocation, essentially implementing a per-group read-only flag. */
 	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
-		ext4_error(sb, "Checksum bad for group %u", block_group);
 		grp = ext4_get_group_info(sb, block_group);
 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
 			percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -191,8 +190,11 @@
 		set_buffer_verified(bh);
 		ext4_unlock_group(sb, block_group);
 		unlock_buffer(bh);
-		if (err)
+		if (err) {
+			ext4_error(sb, "Failed to init inode bitmap for group "
+				   "%u: %d", block_group, err);
 			goto out;
+		}
 		return bh;
 	}
 	ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 83bc8bf..9cc57c3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -686,6 +686,34 @@
 	return retval;
 }
 
+/*
+ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
+ * we have to be careful as someone else may be manipulating b_state as well.
+ */
+static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
+{
+	unsigned long old_state;
+	unsigned long new_state;
+
+	flags &= EXT4_MAP_FLAGS;
+
+	/* Dummy buffer_head? Set non-atomically. */
+	if (!bh->b_page) {
+		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
+		return;
+	}
+	/*
+	 * Someone else may be modifying b_state. Be careful! This is ugly but
+	 * once we get rid of using bh as a container for mapping information
+	 * to pass to / from get_block functions, this can go away.
+	 */
+	do {
+		old_state = READ_ONCE(bh->b_state);
+		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
+	} while (unlikely(
+		 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
+}
+
 /* Maximum number of blocks we map for direct IO at once. */
 #define DIO_MAX_BLOCKS 4096
 
@@ -722,7 +750,7 @@
 		ext4_io_end_t *io_end = ext4_inode_aio(inode);
 
 		map_bh(bh, inode->i_sb, map.m_pblk);
-		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+		ext4_update_bh_state(bh, map.m_flags);
 		if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
 			set_buffer_defer_completion(bh);
 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
@@ -1685,7 +1713,7 @@
 		return ret;
 
 	map_bh(bh, inode->i_sb, map.m_pblk);
-	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+	ext4_update_bh_state(bh, map.m_flags);
 
 	if (buffer_unwritten(bh)) {
 		/* A delayed write to unwritten bh should be marked
@@ -3253,29 +3281,29 @@
 	 * case, we allocate an io_end structure to hook to the iocb.
 	 */
 	iocb->private = NULL;
-	ext4_inode_aio_set(inode, NULL);
-	if (!is_sync_kiocb(iocb)) {
-		io_end = ext4_init_io_end(inode, GFP_NOFS);
-		if (!io_end) {
-			ret = -ENOMEM;
-			goto retake_lock;
-		}
-		/*
-		 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
-		 */
-		iocb->private = ext4_get_io_end(io_end);
-		/*
-		 * we save the io structure for current async direct
-		 * IO, so that later ext4_map_blocks() could flag the
-		 * io structure whether there is a unwritten extents
-		 * needs to be converted when IO is completed.
-		 */
-		ext4_inode_aio_set(inode, io_end);
-	}
-
 	if (overwrite) {
 		get_block_func = ext4_get_block_overwrite;
 	} else {
+		ext4_inode_aio_set(inode, NULL);
+		if (!is_sync_kiocb(iocb)) {
+			io_end = ext4_init_io_end(inode, GFP_NOFS);
+			if (!io_end) {
+				ret = -ENOMEM;
+				goto retake_lock;
+			}
+			/*
+			 * Grab reference for DIO. Will be dropped in
+			 * ext4_end_io_dio()
+			 */
+			iocb->private = ext4_get_io_end(io_end);
+			/*
+			 * we save the io structure for current async direct
+			 * IO, so that later ext4_map_blocks() could flag the
+			 * io structure whether there is a unwritten extents
+			 * needs to be converted when IO is completed.
+			 */
+			ext4_inode_aio_set(inode, io_end);
+		}
 		get_block_func = ext4_get_block_write;
 		dio_flags = DIO_LOCKING;
 	}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0f6c369..a99b010 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -208,7 +208,7 @@
 {
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	handle_t *handle = NULL;
-	int err = EPERM, migrate = 0;
+	int err = -EPERM, migrate = 0;
 	struct ext4_iloc iloc;
 	unsigned int oldflags, mask, i;
 	unsigned int jflag;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 61eaf74..4424b7b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2285,7 +2285,7 @@
 	if (group == 0)
 		seq_puts(seq, "#group: free  frags first ["
 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
-			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]");
+			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
 
 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
 		sizeof(struct ext4_group_info);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index fb6f117..e032a04 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -265,11 +265,12 @@
 	ext4_lblk_t orig_blk_offset, donor_blk_offset;
 	unsigned long blocksize = orig_inode->i_sb->s_blocksize;
 	unsigned int tmp_data_size, data_size, replaced_size;
-	int err2, jblocks, retries = 0;
+	int i, err2, jblocks, retries = 0;
 	int replaced_count = 0;
 	int from = data_offset_in_page << orig_inode->i_blkbits;
 	int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
 	struct super_block *sb = orig_inode->i_sb;
+	struct buffer_head *bh = NULL;
 
 	/*
 	 * It needs twice the amount of ordinary journal buffers because
@@ -380,8 +381,16 @@
 	}
 	/* Perform all necessary steps similar write_begin()/write_end()
 	 * but keeping in mind that i_size will not change */
-	*err = __block_write_begin(pagep[0], from, replaced_size,
-				   ext4_get_block);
+	if (!page_has_buffers(pagep[0]))
+		create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
+	bh = page_buffers(pagep[0]);
+	for (i = 0; i < data_offset_in_page; i++)
+		bh = bh->b_this_page;
+	for (i = 0; i < block_len_in_page; i++) {
+		*err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
+		if (*err < 0)
+			break;
+	}
 	if (!*err)
 		*err = block_commit_write(pagep[0], from, from + replaced_size);
 
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 06574dd..48e4b89 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1558,6 +1558,24 @@
 	struct ext4_dir_entry_2 *de;
 	struct buffer_head *bh;
 
+       if (ext4_encrypted_inode(dir)) {
+               int res = ext4_get_encryption_info(dir);
+
+		/*
+		 * This should be a properly defined flag for
+		 * dentry->d_flags when we uplift this to the VFS.
+		 * d_fsdata is set to (void *) 1 if if the dentry is
+		 * created while the directory was encrypted and we
+		 * don't have access to the key.
+		 */
+	       dentry->d_fsdata = NULL;
+	       if (ext4_encryption_info(dir))
+		       dentry->d_fsdata = (void *) 1;
+	       d_set_d_op(dentry, &ext4_encrypted_d_ops);
+	       if (res && res != -ENOKEY)
+		       return ERR_PTR(res);
+       }
+
 	if (dentry->d_name.len > EXT4_NAME_LEN)
 		return ERR_PTR(-ENAMETOOLONG);
 
@@ -1585,11 +1603,15 @@
 			return ERR_PTR(-EFSCORRUPTED);
 		}
 		if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
-		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-		     S_ISLNK(inode->i_mode)) &&
+		    (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
 		    !ext4_is_child_context_consistent_with_parent(dir,
 								  inode)) {
+			int nokey = ext4_encrypted_inode(inode) &&
+				!ext4_encryption_info(inode);
+
 			iput(inode);
+			if (nokey)
+				return ERR_PTR(-ENOKEY);
 			ext4_warning(inode->i_sb,
 				     "Inconsistent encryption contexts: %lu/%lu\n",
 				     (unsigned long) dir->i_ino,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ad62d7a..34038e3 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -198,7 +198,7 @@
 	if (flex_gd == NULL)
 		goto out3;
 
-	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
 		goto out2;
 	flex_gd->count = flexbg_size;
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6915c95..1f76d89 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -317,6 +317,7 @@
 	struct inode_switch_wbs_context *isw =
 		container_of(work, struct inode_switch_wbs_context, work);
 	struct inode *inode = isw->inode;
+	struct super_block *sb = inode->i_sb;
 	struct address_space *mapping = inode->i_mapping;
 	struct bdi_writeback *old_wb = inode->i_wb;
 	struct bdi_writeback *new_wb = isw->new_wb;
@@ -423,6 +424,7 @@
 	wb_put(new_wb);
 
 	iput(inode);
+	deactivate_super(sb);
 	kfree(isw);
 }
 
@@ -469,11 +471,14 @@
 
 	/* while holding I_WB_SWITCH, no one else can update the association */
 	spin_lock(&inode->i_lock);
+
 	if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
-	    inode_to_wb(inode) == isw->new_wb) {
-		spin_unlock(&inode->i_lock);
-		goto out_free;
-	}
+	    inode_to_wb(inode) == isw->new_wb)
+		goto out_unlock;
+
+	if (!atomic_inc_not_zero(&inode->i_sb->s_active))
+		goto out_unlock;
+
 	inode->i_state |= I_WB_SWITCH;
 	spin_unlock(&inode->i_lock);
 
@@ -489,6 +494,8 @@
 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
 	return;
 
+out_unlock:
+	spin_unlock(&inode->i_lock);
 out_free:
 	if (isw->new_wb)
 		wb_put(isw->new_wb);
diff --git a/fs/inode.c b/fs/inode.c
index 9f62db3..69b8b52 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -154,6 +154,12 @@
 	inode->i_rdev = 0;
 	inode->dirtied_when = 0;
 
+#ifdef CONFIG_CGROUP_WRITEBACK
+	inode->i_wb_frn_winner = 0;
+	inode->i_wb_frn_avg_time = 0;
+	inode->i_wb_frn_history = 0;
+#endif
+
 	if (security_inode_alloc(inode))
 		goto out;
 	spin_lock_init(&inode->i_lock);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 5bcd92d..0cb1abd 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1215,7 +1215,7 @@
 					hdr->pgio_mirror_idx + 1,
 					&hdr->pgio_mirror_idx))
 			goto out_eagain;
-		set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+		set_bit(NFS_LAYOUT_RETURN_REQUESTED,
 			&hdr->lseg->pls_layout->plh_flags);
 		pnfs_read_resend_pnfs(hdr);
 		return task->tk_status;
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 29898a9..eb37046 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -412,7 +412,7 @@
 					 OP_ILLEGAL, GFP_NOIO);
 		if (!fail_return) {
 			if (ff_layout_has_available_ds(lseg))
-				set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+				set_bit(NFS_LAYOUT_RETURN_REQUESTED,
 					&lseg->pls_layout->plh_flags);
 			else
 				pnfs_error_mark_layout_for_return(ino, lseg);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index a3592cc..482b6e9 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -52,9 +52,7 @@
  */
 static LIST_HEAD(pnfs_modules_tbl);
 
-static int
-pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
-		       enum pnfs_iomode iomode, bool sync);
+static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
 
 /* Return the registered pnfs layout driver module matching given id */
 static struct pnfs_layoutdriver_type *
@@ -243,6 +241,8 @@
 {
 	struct inode *inode = lo->plh_inode;
 
+	pnfs_layoutreturn_before_put_layout_hdr(lo);
+
 	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
 		if (!list_empty(&lo->plh_segs))
 			WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
@@ -345,58 +345,6 @@
 	rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
 }
 
-/* Return true if layoutreturn is needed */
-static bool
-pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
-			struct pnfs_layout_segment *lseg)
-{
-	struct pnfs_layout_segment *s;
-
-	if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
-		return false;
-
-	list_for_each_entry(s, &lo->plh_segs, pls_list)
-		if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
-			return false;
-
-	return true;
-}
-
-static bool
-pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
-{
-	if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
-		return false;
-	lo->plh_return_iomode = 0;
-	pnfs_get_layout_hdr(lo);
-	clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
-	return true;
-}
-
-static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
-		struct pnfs_layout_hdr *lo, struct inode *inode)
-{
-	lo = lseg->pls_layout;
-	inode = lo->plh_inode;
-
-	spin_lock(&inode->i_lock);
-	if (pnfs_layout_need_return(lo, lseg)) {
-		nfs4_stateid stateid;
-		enum pnfs_iomode iomode;
-		bool send;
-
-		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
-		iomode = lo->plh_return_iomode;
-		send = pnfs_prepare_layoutreturn(lo);
-		spin_unlock(&inode->i_lock);
-		if (send) {
-			/* Send an async layoutreturn so we dont deadlock */
-			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
-		}
-	} else
-		spin_unlock(&inode->i_lock);
-}
-
 void
 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
 {
@@ -410,15 +358,8 @@
 		atomic_read(&lseg->pls_refcount),
 		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 
-	/* Handle the case where refcount != 1 */
-	if (atomic_add_unless(&lseg->pls_refcount, -1, 1))
-		return;
-
 	lo = lseg->pls_layout;
 	inode = lo->plh_inode;
-	/* Do we need a layoutreturn? */
-	if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
-		pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
 
 	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
 		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
@@ -937,6 +878,17 @@
 	rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 }
 
+static bool
+pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+{
+	if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+		return false;
+	lo->plh_return_iomode = 0;
+	pnfs_get_layout_hdr(lo);
+	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+	return true;
+}
+
 static int
 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
 		       enum pnfs_iomode iomode, bool sync)
@@ -971,6 +923,48 @@
 	return status;
 }
 
+/* Return true if layoutreturn is needed */
+static bool
+pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
+{
+	struct pnfs_layout_segment *s;
+
+	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+		return false;
+
+	/* Defer layoutreturn until all lsegs are done */
+	list_for_each_entry(s, &lo->plh_segs, pls_list) {
+		if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
+			return false;
+	}
+
+	return true;
+}
+
+static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+	struct inode *inode= lo->plh_inode;
+
+	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+		return;
+	spin_lock(&inode->i_lock);
+	if (pnfs_layout_need_return(lo)) {
+		nfs4_stateid stateid;
+		enum pnfs_iomode iomode;
+		bool send;
+
+		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
+		iomode = lo->plh_return_iomode;
+		send = pnfs_prepare_layoutreturn(lo);
+		spin_unlock(&inode->i_lock);
+		if (send) {
+			/* Send an async layoutreturn so we dont deadlock */
+			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
+		}
+	} else
+		spin_unlock(&inode->i_lock);
+}
+
 /*
  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  * when the layout segment list is empty.
@@ -1091,7 +1085,7 @@
 
 	nfs4_stateid_copy(&stateid, &lo->plh_stateid);
 	/* always send layoutreturn if being marked so */
-	if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+	if (test_and_clear_bit(NFS_LAYOUT_RETURN_REQUESTED,
 				   &lo->plh_flags))
 		layoutreturn = pnfs_prepare_layoutreturn(lo);
 
@@ -1772,7 +1766,7 @@
 			pnfs_set_plh_return_iomode(lo, return_range->iomode);
 			if (!mark_lseg_invalid(lseg, tmp_list))
 				remaining++;
-			set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+			set_bit(NFS_LAYOUT_RETURN_REQUESTED,
 					&lo->plh_flags);
 		}
 	return remaining;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 9f4e2a4..1ac1db5 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -94,8 +94,8 @@
 	NFS_LAYOUT_RO_FAILED = 0,	/* get ro layout failed stop trying */
 	NFS_LAYOUT_RW_FAILED,		/* get rw layout failed stop trying */
 	NFS_LAYOUT_BULK_RECALL,		/* bulk recall affecting layout */
-	NFS_LAYOUT_RETURN,		/* Return this layout ASAP */
-	NFS_LAYOUT_RETURN_BEFORE_CLOSE,	/* Return this layout before close */
+	NFS_LAYOUT_RETURN,		/* layoutreturn in progress */
+	NFS_LAYOUT_RETURN_REQUESTED,	/* Return this layout ASAP */
 	NFS_LAYOUT_INVALID_STID,	/* layout stateid id is invalid */
 	NFS_LAYOUT_FIRST_LAYOUTGET,	/* Serialize first layoutget */
 };
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index cfcbf11..7115c5d 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -91,7 +91,14 @@
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
 
+#define FSNOTIFY_REAPER_DELAY	(1)	/* 1 jiffy */
+
 struct srcu_struct fsnotify_mark_srcu;
+static DEFINE_SPINLOCK(destroy_lock);
+static LIST_HEAD(destroy_list);
+
+static void fsnotify_mark_destroy(struct work_struct *work);
+static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);
 
 void fsnotify_get_mark(struct fsnotify_mark *mark)
 {
@@ -165,19 +172,10 @@
 	atomic_dec(&group->num_marks);
 }
 
-static void
-fsnotify_mark_free_rcu(struct rcu_head *rcu)
-{
-	struct fsnotify_mark	*mark;
-
-	mark = container_of(rcu, struct fsnotify_mark, g_rcu);
-	fsnotify_put_mark(mark);
-}
-
 /*
- * Free fsnotify mark. The freeing is actually happening from a call_srcu
- * callback. Caller must have a reference to the mark or be protected by
- * fsnotify_mark_srcu.
+ * Free fsnotify mark. The freeing is actually happening from a kthread which
+ * first waits for srcu period end. Caller must have a reference to the mark
+ * or be protected by fsnotify_mark_srcu.
  */
 void fsnotify_free_mark(struct fsnotify_mark *mark)
 {
@@ -192,7 +190,11 @@
 	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
 	spin_unlock(&mark->lock);
 
-	call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
+	spin_lock(&destroy_lock);
+	list_add(&mark->g_list, &destroy_list);
+	spin_unlock(&destroy_lock);
+	queue_delayed_work(system_unbound_wq, &reaper_work,
+				FSNOTIFY_REAPER_DELAY);
 
 	/*
 	 * Some groups like to know that marks are being freed.  This is a
@@ -388,7 +390,12 @@
 
 	spin_unlock(&mark->lock);
 
-	call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
+	spin_lock(&destroy_lock);
+	list_add(&mark->g_list, &destroy_list);
+	spin_unlock(&destroy_lock);
+	queue_delayed_work(system_unbound_wq, &reaper_work,
+				FSNOTIFY_REAPER_DELAY);
+
 	return ret;
 }
 
@@ -491,3 +498,21 @@
 	atomic_set(&mark->refcnt, 1);
 	mark->free_mark = free_mark;
 }
+
+static void fsnotify_mark_destroy(struct work_struct *work)
+{
+	struct fsnotify_mark *mark, *next;
+	struct list_head private_destroy_list;
+
+	spin_lock(&destroy_lock);
+	/* exchange the list head */
+	list_replace_init(&destroy_list, &private_destroy_list);
+	spin_unlock(&destroy_lock);
+
+	synchronize_srcu(&fsnotify_mark_srcu);
+
+	list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
+		list_del_init(&mark->g_list);
+		fsnotify_put_mark(mark);
+	}
+}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index a3cc6d2..a76b9ea 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1254,15 +1254,15 @@
 
 void o2hb_exit(void)
 {
-	kfree(o2hb_db_livenodes);
-	kfree(o2hb_db_liveregions);
-	kfree(o2hb_db_quorumregions);
-	kfree(o2hb_db_failedregions);
 	debugfs_remove(o2hb_debug_failedregions);
 	debugfs_remove(o2hb_debug_quorumregions);
 	debugfs_remove(o2hb_debug_liveregions);
 	debugfs_remove(o2hb_debug_livenodes);
 	debugfs_remove(o2hb_debug_dir);
+	kfree(o2hb_db_livenodes);
+	kfree(o2hb_db_liveregions);
+	kfree(o2hb_db_quorumregions);
+	kfree(o2hb_db_failedregions);
 }
 
 static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir,
@@ -1438,13 +1438,15 @@
 
 	kfree(reg->hr_slots);
 
-	kfree(reg->hr_db_regnum);
-	kfree(reg->hr_db_livenodes);
 	debugfs_remove(reg->hr_debug_livenodes);
 	debugfs_remove(reg->hr_debug_regnum);
 	debugfs_remove(reg->hr_debug_elapsed_time);
 	debugfs_remove(reg->hr_debug_pinned);
 	debugfs_remove(reg->hr_debug_dir);
+	kfree(reg->hr_db_livenodes);
+	kfree(reg->hr_db_regnum);
+	kfree(reg->hr_debug_elapsed_time);
+	kfree(reg->hr_debug_pinned);
 
 	spin_lock(&o2hb_live_lock);
 	list_del(&reg->hr_all_item);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index c5bdf02..b94a425 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2367,6 +2367,8 @@
 						break;
 					}
 				}
+				dlm_lockres_clear_refmap_bit(dlm, res,
+						dead_node);
 				spin_unlock(&res->spinlock);
 				continue;
 			}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 85d16c6..fa95ab2 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -259,23 +259,29 @@
 				sizeof(struct proc_maps_private));
 }
 
-static pid_t pid_of_stack(struct proc_maps_private *priv,
-				struct vm_area_struct *vma, bool is_pid)
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static int is_stack(struct proc_maps_private *priv,
+		    struct vm_area_struct *vma, int is_pid)
 {
-	struct inode *inode = priv->inode;
-	struct task_struct *task;
-	pid_t ret = 0;
+	int stack = 0;
 
-	rcu_read_lock();
-	task = pid_task(proc_pid(inode), PIDTYPE_PID);
-	if (task) {
-		task = task_of_stack(task, vma, is_pid);
+	if (is_pid) {
+		stack = vma->vm_start <= vma->vm_mm->start_stack &&
+			vma->vm_end >= vma->vm_mm->start_stack;
+	} else {
+		struct inode *inode = priv->inode;
+		struct task_struct *task;
+
+		rcu_read_lock();
+		task = pid_task(proc_pid(inode), PIDTYPE_PID);
 		if (task)
-			ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+			stack = vma_is_stack_for_task(vma, task);
+		rcu_read_unlock();
 	}
-	rcu_read_unlock();
-
-	return ret;
+	return stack;
 }
 
 static void
@@ -335,8 +341,6 @@
 
 	name = arch_vma_name(vma);
 	if (!name) {
-		pid_t tid;
-
 		if (!mm) {
 			name = "[vdso]";
 			goto done;
@@ -348,21 +352,8 @@
 			goto done;
 		}
 
-		tid = pid_of_stack(priv, vma, is_pid);
-		if (tid != 0) {
-			/*
-			 * Thread stack in /proc/PID/task/TID/maps or
-			 * the main process stack.
-			 */
-			if (!is_pid || (vma->vm_start <= mm->start_stack &&
-			    vma->vm_end >= mm->start_stack)) {
-				name = "[stack]";
-			} else {
-				/* Thread stack in /proc/PID/maps */
-				seq_pad(m, ' ');
-				seq_printf(m, "[stack:%d]", tid);
-			}
-		}
+		if (is_stack(priv, vma, is_pid))
+			name = "[stack]";
 	}
 
 done:
@@ -1552,18 +1543,19 @@
 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
 		unsigned long addr, unsigned long end, struct mm_walk *walk)
 {
+	pte_t huge_pte = huge_ptep_get(pte);
 	struct numa_maps *md;
 	struct page *page;
 
-	if (!pte_present(*pte))
+	if (!pte_present(huge_pte))
 		return 0;
 
-	page = pte_page(*pte);
+	page = pte_page(huge_pte);
 	if (!page)
 		return 0;
 
 	md = walk->private;
-	gather_stats(page, md, pte_dirty(*pte), 1);
+	gather_stats(page, md, pte_dirty(huge_pte), 1);
 	return 0;
 }
 
@@ -1617,19 +1609,8 @@
 		seq_file_path(m, file, "\n\t= ");
 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
 		seq_puts(m, " heap");
-	} else {
-		pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
-		if (tid != 0) {
-			/*
-			 * Thread stack in /proc/PID/task/TID/maps or
-			 * the main process stack.
-			 */
-			if (!is_pid || (vma->vm_start <= mm->start_stack &&
-			    vma->vm_end >= mm->start_stack))
-				seq_puts(m, " stack");
-			else
-				seq_printf(m, " stack:%d", tid);
-		}
+	} else if (is_stack(proc_priv, vma, is_pid)) {
+		seq_puts(m, " stack");
 	}
 
 	if (is_vm_hugetlb_page(vma))
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index e0d64c9..faacb0c 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -123,23 +123,26 @@
 	return size;
 }
 
-static pid_t pid_of_stack(struct proc_maps_private *priv,
-				struct vm_area_struct *vma, bool is_pid)
+static int is_stack(struct proc_maps_private *priv,
+		    struct vm_area_struct *vma, int is_pid)
 {
-	struct inode *inode = priv->inode;
-	struct task_struct *task;
-	pid_t ret = 0;
+	struct mm_struct *mm = vma->vm_mm;
+	int stack = 0;
 
-	rcu_read_lock();
-	task = pid_task(proc_pid(inode), PIDTYPE_PID);
-	if (task) {
-		task = task_of_stack(task, vma, is_pid);
+	if (is_pid) {
+		stack = vma->vm_start <= mm->start_stack &&
+			vma->vm_end >= mm->start_stack;
+	} else {
+		struct inode *inode = priv->inode;
+		struct task_struct *task;
+
+		rcu_read_lock();
+		task = pid_task(proc_pid(inode), PIDTYPE_PID);
 		if (task)
-			ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+			stack = vma_is_stack_for_task(vma, task);
+		rcu_read_unlock();
 	}
-	rcu_read_unlock();
-
-	return ret;
+	return stack;
 }
 
 /*
@@ -181,21 +184,9 @@
 	if (file) {
 		seq_pad(m, ' ');
 		seq_file_path(m, file, "");
-	} else if (mm) {
-		pid_t tid = pid_of_stack(priv, vma, is_pid);
-
-		if (tid != 0) {
-			seq_pad(m, ' ');
-			/*
-			 * Thread stack in /proc/PID/task/TID/maps or
-			 * the main process stack.
-			 */
-			if (!is_pid || (vma->vm_start <= mm->start_stack &&
-			    vma->vm_end >= mm->start_stack))
-				seq_printf(m, "[stack]");
-			else
-				seq_printf(m, "[stack:%d]", tid);
-		}
+	} else if (mm && is_stack(priv, vma, is_pid)) {
+		seq_pad(m, ' ');
+		seq_printf(m, "[stack]");
 	}
 
 	seq_putc(m, '\n');
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index da37beb..594f7e6 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -4491,7 +4491,7 @@
 	 * know precisely what failed.
 	 */
 	if (pass == XLOG_RECOVER_CRCPASS) {
-		if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc))
+		if (rhead->h_crc && crc != rhead->h_crc)
 			return -EFSBADCRC;
 		return 0;
 	}
@@ -4502,7 +4502,7 @@
 	 * zero CRC check prevents warnings from being emitted when upgrading
 	 * the kernel from one that does not add CRCs by default.
 	 */
-	if (crc != le32_to_cpu(rhead->h_crc)) {
+	if (crc != rhead->h_crc) {
 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
 			xfs_alert(log->l_mp,
 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 717a298..dad8af3 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -133,6 +133,5 @@
 /* Methods to interact with the PCC mailbox controller. */
 extern struct mbox_chan *
 	pcc_mbox_request_channel(struct mbox_client *, unsigned int);
-extern int mbox_send_message(struct mbox_chan *chan, void *mssg);
 
 #endif /* _CPPC_ACPI_H*/
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 0419485..0f1c6f3 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -75,7 +75,7 @@
  */
 static inline cputime_t timespec_to_cputime(const struct timespec *val)
 {
-	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+	u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
 	return (__force cputime_t) ret;
 }
 static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@
  */
 static inline cputime_t timeval_to_cputime(const struct timeval *val)
 {
-	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+	u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
+			val->tv_usec * NSEC_PER_USEC;
 	return (__force cputime_t) ret;
 }
 static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 0b3c0d3..c370b26 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -239,6 +239,14 @@
 			    pmd_t *pmdp);
 #endif
 
+#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+					   unsigned long address, pmd_t *pmdp)
+{
+
+}
+#endif
+
 #ifndef __HAVE_ARCH_PTE_SAME
 static inline int pte_same(pte_t pte_a, pte_t pte_b)
 {
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 7bfb063..461a055 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,4 +35,13 @@
 
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
 
+static inline bool drm_arch_can_wc_memory(void)
+{
+#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+	return false;
+#else
+	return true;
+#endif
+}
+
 #endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c65a212..c5b4b81 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1166,6 +1166,7 @@
 	struct drm_mode_object base;
 
 	char *name;
+	int connector_id;
 	int connector_type;
 	int connector_type_id;
 	bool interlace_allowed;
@@ -2047,6 +2048,7 @@
 	struct list_head fb_list;
 
 	int num_connector;
+	struct ida connector_ida;
 	struct list_head connector_list;
 	int num_encoder;
 	struct list_head encoder_list;
@@ -2200,7 +2202,11 @@
 void drm_connector_unregister(struct drm_connector *connector);
 
 extern void drm_connector_cleanup(struct drm_connector *connector);
-extern unsigned int drm_connector_index(struct drm_connector *connector);
+static inline unsigned drm_connector_index(struct drm_connector *connector)
+{
+	return connector->connector_id;
+}
+
 /* helper to unplug all connectors from sysfs for device */
 extern void drm_connector_unplug_all(struct drm_device *dev);
 
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 24ab178..fdb4705 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -44,8 +44,6 @@
 /**
  * struct drm_dp_mst_port - MST port
  * @kref: reference count for this port.
- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
- * @guid: guid for DP 1.2 device on this port.
  * @port_num: port number
  * @input: if this port is an input port.
  * @mcs: message capability status - DP 1.2 spec.
@@ -70,10 +68,6 @@
 struct drm_dp_mst_port {
 	struct kref kref;
 
-	/* if dpcd 1.2 device is on this port - its GUID info */
-	bool guid_valid;
-	u8 guid[16];
-
 	u8 port_num;
 	bool input;
 	bool mcs;
@@ -110,10 +104,12 @@
  * @tx_slots: transmission slots for this device.
  * @last_seqno: last sequence number used to talk to this.
  * @link_address_sent: if a link address message has been sent to this device yet.
+ * @guid: guid for DP 1.2 branch device. port under this branch can be
+ * identified by port #.
  *
  * This structure represents an MST branch device, there is one
- * primary branch device at the root, along with any others connected
- * to downstream ports
+ * primary branch device at the root, along with any other branches connected
+ * to downstream port of parent branches.
  */
 struct drm_dp_mst_branch {
 	struct kref kref;
@@ -132,6 +128,9 @@
 	struct drm_dp_sideband_msg_tx *tx_slots[2];
 	int last_seqno;
 	bool link_address_sent;
+
+	/* global unique identifier to identify branch devices */
+	u8 guid[16];
 };
 
 
@@ -406,11 +405,9 @@
  * @conn_base_id: DRM connector ID this mgr is connected to.
  * @down_rep_recv: msg receiver state for down replies.
  * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, guid, dpcd.
+ * @lock: protects mst state, primary, dpcd.
  * @mst_state: if this manager is enabled for an MST capable port.
  * @mst_primary: pointer to the primary branch device.
- * @guid_valid: GUID valid for the primary branch device.
- * @guid: GUID for primary port.
  * @dpcd: cache of DPCD for primary port.
  * @pbn_div: PBN to slots divisor.
  *
@@ -432,13 +429,11 @@
 	struct drm_dp_sideband_msg_rx up_req_recv;
 
 	/* pointer to info about the initial MST device */
-	struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+	struct mutex lock; /* protects mst_state + primary + dpcd */
 
 	bool mst_state;
 	struct drm_dp_mst_branch *mst_primary;
-	/* primary MST device GUID */
-	bool guid_valid;
-	u8 guid[16];
+
 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
 	u8 sink_count;
 	int pbn_div;
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index d639049..553210c 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -73,18 +73,28 @@
 #define DRM_FIXED_ONE		(1ULL << DRM_FIXED_POINT)
 #define DRM_FIXED_DECIMAL_MASK	(DRM_FIXED_ONE - 1)
 #define DRM_FIXED_DIGITS_MASK	(~DRM_FIXED_DECIMAL_MASK)
+#define DRM_FIXED_EPSILON	1LL
+#define DRM_FIXED_ALMOST_ONE	(DRM_FIXED_ONE - DRM_FIXED_EPSILON)
 
 static inline s64 drm_int2fixp(int a)
 {
 	return ((s64)a) << DRM_FIXED_POINT;
 }
 
-static inline int drm_fixp2int(int64_t a)
+static inline int drm_fixp2int(s64 a)
 {
 	return ((s64)a) >> DRM_FIXED_POINT;
 }
 
-static inline unsigned drm_fixp_msbset(int64_t a)
+static inline int drm_fixp2int_ceil(s64 a)
+{
+	if (a > 0)
+		return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+	else
+		return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+}
+
+static inline unsigned drm_fixp_msbset(s64 a)
 {
 	unsigned shift, sign = (a >> 63) & 1;
 
@@ -136,6 +146,45 @@
 	return result;
 }
 
+static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
+{
+	s64 res;
+	bool a_neg = a < 0;
+	bool b_neg = b < 0;
+	u64 a_abs = a_neg ? -a : a;
+	u64 b_abs = b_neg ? -b : b;
+	u64 rem;
+
+	/* determine integer part */
+	u64 res_abs  = div64_u64_rem(a_abs, b_abs, &rem);
+
+	/* determine fractional part */
+	{
+		u32 i = DRM_FIXED_POINT;
+
+		do {
+			rem <<= 1;
+			res_abs <<= 1;
+			if (rem >= b_abs) {
+				res_abs |= 1;
+				rem -= b_abs;
+			}
+		} while (--i != 0);
+	}
+
+	/* round up LSB */
+	{
+		u64 summand = (rem << 1) >= b_abs;
+
+		res_abs += summand;
+	}
+
+	res = (s64) res_abs;
+	if (a_neg ^ b_neg)
+		res = -res;
+	return res;
+}
+
 static inline s64 drm_fixp_exp(s64 x)
 {
 	s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6f45aea..0a05b0d 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -126,7 +126,7 @@
 /* 104 */
 /* 105 */
 #define TEGRA210_CLK_D_AUDIO 106
-/* 107 ( affects abp -> ape) */
+#define TEGRA210_CLK_APB2APE 107
 /* 108 */
 /* 109 */
 /* 110 */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 29189ae..4571ef1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -682,9 +682,12 @@
 /*
  * q->prep_rq_fn return values
  */
-#define BLKPREP_OK		0	/* serve it */
-#define BLKPREP_KILL		1	/* fatal error, kill */
-#define BLKPREP_DEFER		2	/* leave on queue */
+enum {
+	BLKPREP_OK,		/* serve it */
+	BLKPREP_KILL,		/* fatal error, kill, return -EIO */
+	BLKPREP_DEFER,		/* leave on queue */
+	BLKPREP_INVALID,	/* invalid command, kill, return -EREMOTEIO */
+};
 
 extern unsigned long blk_max_low_pfn, blk_max_pfn;
 
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index f89b31d..c1ef6f1 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -63,6 +63,18 @@
 #define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
 // duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
 #define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49)  /* overlap w/ above */
+#define CEPH_FEATURE_MON_METADATA (1ULL<<50)
+#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */
+#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52)
+#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53)
+#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54)
+#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55)
+#define CEPH_FEATURE_NEW_OSDOP_ENCODING   (1ULL<<56) /* New, v7 encoding */
+#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */
+#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */
+#define CEPH_FEATURE_CRUSH_TUNABLES5	(1ULL<<58) /* chooseleaf stable mode */
+// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
+#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING   (1ULL<<58) /* New, v7 encoding */
 
 /*
  * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -108,7 +120,9 @@
 	 CEPH_FEATURE_CRUSH_TUNABLES3 |		\
 	 CEPH_FEATURE_OSD_PRIMARY_AFFINITY |	\
 	 CEPH_FEATURE_MSGR_KEEPALIVE2 |		\
-	 CEPH_FEATURE_CRUSH_V4)
+	 CEPH_FEATURE_CRUSH_V4 |		\
+	 CEPH_FEATURE_CRUSH_TUNABLES5 |		\
+	 CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
 
 #define CEPH_FEATURES_REQUIRED_DEFAULT   \
 	(CEPH_FEATURE_NOSRCADDR |	 \
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 7f540f7..789471d 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -127,6 +127,12 @@
 	 */
 	u64 serial_nr;
 
+	/*
+	 * Incremented by online self and children.  Used to guarantee that
+	 * parents are not offlined before their children.
+	 */
+	atomic_t online_cnt;
+
 	/* percpu_ref killing and RCU release */
 	struct rcu_head rcu_head;
 	struct work_struct destroy_work;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 00b042c..48f5aab 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,7 +144,7 @@
  */
 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
 #define __trace_if(cond) \
-	if (__builtin_constant_p((cond)) ? !!(cond) :			\
+	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
 	({								\
 		int ______r;						\
 		static struct ftrace_branch_data			\
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868c..fea160e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,6 +137,8 @@
 	task_unlock(current);
 }
 
+extern void cpuset_post_attach_flush(void);
+
 #else /* !CONFIG_CPUSETS */
 
 static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@
 	return false;
 }
 
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 48b4930..be8f12b 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -59,7 +59,8 @@
 	CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
 	CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
 	CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
-	CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
+	CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
+	CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
 };
 
 /*
@@ -205,6 +206,11 @@
 	 * mappings line up a bit better with previous mappings. */
 	__u8 chooseleaf_vary_r;
 
+	/* if true, it makes chooseleaf firstn to return stable results (if
+	 * no local retry) so that data migrations would be optimal when some
+	 * device fails. */
+	__u8 chooseleaf_stable;
+
 #ifndef __KERNEL__
 	/*
 	 * version 0 (original) of straw_calc has various flaws.  version 1
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 251a209..e0ee0b3 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,6 +19,8 @@
 
 int devpts_new_index(struct inode *ptmx_inode);
 void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
 /* mknod in devpts */
 struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
 		void *priv);
@@ -32,6 +34,8 @@
 /* Dummy stubs in the no-pty case */
 static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
 static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
 static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
 		dev_t device, int index, void *priv)
 {
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 3c6cbbd..1acd723 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1203,7 +1203,10 @@
 struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
 				       struct list_head *head, bool remove);
 
-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+		     unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+				  size_t len);
 
 extern struct work_struct efivar_work;
 void efivar_run_worker(void);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6b7e89f..533c440 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -220,10 +220,7 @@
 	/* List of marks by group->i_fsnotify_marks. Also reused for queueing
 	 * mark into destroy_list when it's waiting for the end of SRCU period
 	 * before it can be freed. [group->mark_mutex] */
-	union {
-		struct list_head g_list;
-		struct rcu_head g_rcu;
-	};
+	struct list_head g_list;
 	/* Protects inode / mnt pointers, flags, masks */
 	spinlock_t lock;
 	/* List of marks for inode / vfsmount [obj_lock] */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 81de712..c2b340e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -603,6 +603,7 @@
 
 extern int skip_trace(unsigned long ip);
 extern void ftrace_module_init(struct module *mod);
+extern void ftrace_module_enable(struct module *mod);
 extern void ftrace_release_mod(struct module *mod);
 
 extern void ftrace_disable_daemon(void);
@@ -612,8 +613,9 @@
 static inline int ftrace_force_update(void) { return 0; }
 static inline void ftrace_disable_daemon(void) { }
 static inline void ftrace_enable_daemon(void) { }
-static inline void ftrace_release_mod(struct module *mod) {}
-static inline void ftrace_module_init(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) { }
+static inline void ftrace_module_enable(struct module *mod) { }
+static inline void ftrace_release_mod(struct module *mod) { }
 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
 	return -EINVAL;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 28ad5f6..af1f2b2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -547,16 +547,16 @@
 }
 #endif /* CONFIG_PM_SLEEP */
 
-#ifdef CONFIG_CMA
-
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 /* The below functions must be run on a range from a single zone. */
 extern int alloc_contig_range(unsigned long start, unsigned long end,
 			      unsigned migratetype);
 extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+#endif
 
+#ifdef CONFIG_CMA
 /* CMA stuff */
 extern void init_cma_reserved_pageblock(struct page *page);
-
 #endif
 
 #endif /* __LINUX_GFP_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 821273c..2d9b6500 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -235,6 +235,9 @@
 /* low 64 bit */
 #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
 
+/* PRS_REG */
+#define DMA_PRS_PPR	((u32)1)
+
 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\
 do {									\
 	cycles_t start_time = get_cycles();				\
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 851821b..bec2abb 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -526,6 +526,7 @@
 enum ata_lpm_hints {
 	ATA_LPM_EMPTY		= (1 << 0), /* port empty/probing */
 	ATA_LPM_HIPM		= (1 << 1), /* may use HIPM */
+	ATA_LPM_WAKE_ONLY	= (1 << 2), /* only wake up link */
 };
 
 /* forward declarations */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d675011..2190419 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -135,6 +135,10 @@
 	/* Memory types */
 	NVM_ID_FMTYPE_SLC	= 0,
 	NVM_ID_FMTYPE_MLC	= 1,
+
+	/* Device capabilities */
+	NVM_ID_DCAP_BBLKMGMT	= 0x1,
+	NVM_UD_DCAP_ECC		= 0x2,
 };
 
 struct nvm_id_lp_mlc {
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c57e424..4dca42f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -66,7 +66,7 @@
 	/*
 	 * class-hash:
 	 */
-	struct list_head		hash_entry;
+	struct hlist_node		hash_entry;
 
 	/*
 	 * global list of all lock-classes:
@@ -199,7 +199,7 @@
 	u8				irq_context;
 	u8				depth;
 	u16				base;
-	struct list_head		entry;
+	struct hlist_node		entry;
 	u64				chain_key;
 };
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9ae48d4..792c898 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -51,7 +51,7 @@
 	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
 	MEM_CGROUP_STAT_NSTATS,
 	/* default hierarchy stats */
-	MEMCG_SOCK,
+	MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
 	MEMCG_NR_STAT,
 };
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f1cd22f..516e149 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -201,11 +201,13 @@
 #endif
 
 #ifdef CONFIG_STACK_GROWSUP
-#define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK	VM_GROWSUP
 #else
-#define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK	VM_GROWSDOWN
 #endif
 
+#define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+
 /*
  * Special vmas that are non-mergable, non-mlock()able.
  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
@@ -1341,8 +1343,7 @@
 		!vma_growsup(vma->vm_next, addr);
 }
 
-extern struct task_struct *task_of_stack(struct task_struct *task,
-				struct vm_area_struct *vma, bool in_group);
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
 
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
 		unsigned long old_addr, struct vm_area_struct *new_vma,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d3ebb9d..624b78b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -424,9 +424,9 @@
 	unsigned long total_vm;		/* Total pages mapped */
 	unsigned long locked_vm;	/* Pages that have PG_mlocked set */
 	unsigned long pinned_vm;	/* Refcount permanently increased */
-	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED/GROWSDOWN */
-	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE */
-	unsigned long stack_vm;		/* VM_GROWSUP/DOWN */
+	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+	unsigned long stack_vm;		/* VM_STACK */
 	unsigned long def_flags;
 	unsigned long start_code, end_code, start_data, end_data;
 	unsigned long start_brk, brk, start_stack;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 33bb1b1..7b6c2cf 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -682,6 +682,12 @@
 	 */
 	unsigned long first_deferred_pfn;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	spinlock_t split_queue_lock;
+	struct list_head split_queue;
+	unsigned long split_queue_len;
+#endif
 } pg_data_t;
 
 #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
diff --git a/include/linux/module.h b/include/linux/module.h
index 4560d8f..2bb0c30 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -324,6 +324,12 @@
 #define __module_layout_align
 #endif
 
+struct mod_kallsyms {
+	Elf_Sym *symtab;
+	unsigned int num_symtab;
+	char *strtab;
+};
+
 struct module {
 	enum module_state state;
 
@@ -405,15 +411,10 @@
 #endif
 
 #ifdef CONFIG_KALLSYMS
-	/*
-	 * We keep the symbol and string tables for kallsyms.
-	 * The core_* fields below are temporary, loader-only (they
-	 * could really be discarded after module init).
-	 */
-	Elf_Sym *symtab, *core_symtab;
-	unsigned int num_symtab, core_num_syms;
-	char *strtab, *core_strtab;
-
+	/* Protected by RCU and/or module_mutex: use rcu_dereference() */
+	struct mod_kallsyms *kallsyms;
+	struct mod_kallsyms core_kallsyms;
+	
 	/* Section attributes */
 	struct module_sect_attrs *sect_attrs;
 
diff --git a/include/linux/of.h b/include/linux/of.h
index dd10626..dc6e396 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -929,7 +929,7 @@
 	return num;
 }
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && !defined(MODULE)
 #define _OF_DECLARE(table, name, compat, fn, fn_type)			\
 	static const struct of_device_id __of_table_##name		\
 		__used __section(__##table##_of_table)			\
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 2d8e497..1132953 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -10,7 +10,7 @@
  * backing is indicated by flags in the high bits of the value.
  */
 typedef struct {
-	unsigned long val;
+	u64 val;
 } pfn_t;
 #endif
 
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 37448ab..9499481 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -9,14 +9,13 @@
  * PFN_DEV - pfn is not covered by system memmap by default
  * PFN_MAP - pfn has a dynamic page mapping established by a device driver
  */
-#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \
-		<< (BITS_PER_LONG - PAGE_SHIFT))
-#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1))
-#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2))
-#define PFN_DEV (1UL << (BITS_PER_LONG - 3))
-#define PFN_MAP (1UL << (BITS_PER_LONG - 4))
+#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
+#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
+#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
+#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
+#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
 
-static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags)
+static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
 {
 	pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
 
@@ -29,7 +28,7 @@
 	return __pfn_to_pfn_t(pfn, 0);
 }
 
-extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
+extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
 
 static inline bool pfn_t_has_page(pfn_t pfn)
 {
@@ -87,7 +86,7 @@
 #ifdef __HAVE_ARCH_PTE_DEVMAP
 static inline bool pfn_t_devmap(pfn_t pfn)
 {
-	const unsigned long flags = PFN_DEV|PFN_MAP;
+	const u64 flags = PFN_DEV|PFN_MAP;
 
 	return (pfn.val & flags) == flags;
 }
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 7c88ad1..f54be70 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -379,12 +379,28 @@
 			     struct radix_tree_iter *iter, unsigned flags);
 
 /**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter:	iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true.  If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+	iter->next_index = iter->index;
+	return NULL;
+}
+
+/**
  * radix_tree_chunk_size - get current chunk size
  *
  * @iter:	pointer to radix tree iterator
  * Returns:	current chunk size
  */
-static __always_inline unsigned
+static __always_inline long
 radix_tree_chunk_size(struct radix_tree_iter *iter)
 {
 	return iter->next_index - iter->index;
@@ -418,9 +434,9 @@
 			return slot + offset + 1;
 		}
 	} else {
-		unsigned size = radix_tree_chunk_size(iter) - 1;
+		long size = radix_tree_chunk_size(iter);
 
-		while (size--) {
+		while (--size > 0) {
 			slot++;
 			iter->index++;
 			if (likely(*slot))
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index a7a06d1..a0118d5 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -152,6 +152,8 @@
 
 # define jiffies	raid6_jiffies()
 # define printk 	printf
+# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
+# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
 # define GFP_KERNEL	0
 # define __get_free_pages(x, y)	((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
 						     PROT_READ|PROT_WRITE,   \
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bdf597c..a07f42b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -109,20 +109,6 @@
 		__put_anon_vma(anon_vma);
 }
 
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
-{
-	struct anon_vma *anon_vma = vma->anon_vma;
-	if (anon_vma)
-		down_write(&anon_vma->root->rwsem);
-}
-
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
-{
-	struct anon_vma *anon_vma = vma->anon_vma;
-	if (anon_vma)
-		up_write(&anon_vma->root->rwsem);
-}
-
 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 {
 	down_write(&anon_vma->root->rwsem);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 11f935c..4ce9ff7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -299,6 +299,7 @@
 #else
 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 #endif
+extern int sysctl_max_skb_frags;
 
 typedef struct skb_frag_struct skb_frag_t;
 
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index acd522a..acfdbf3 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
  * See the file COPYING for more details.
  */
 
+#include <linux/smp.h>
 #include <linux/errno.h>
 #include <linux/types.h>
+#include <linux/cpumask.h>
 #include <linux/rcupdate.h>
 #include <linux/tracepoint-defs.h>
 
@@ -132,6 +134,9 @@
 		void *it_func;						\
 		void *__data;						\
 									\
+		if (!cpu_online(raw_smp_processor_id()))		\
+			return;						\
+									\
 		if (!(cond))						\
 			return;						\
 		prercu;							\
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cbb20af..bb679b4 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -11,4 +11,8 @@
 unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
 int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
 
+unsigned long ucs2_utf8size(const ucs2_char_t *src);
+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
+			   unsigned long maxlength);
+
 #endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0e32bc7..ca73c50 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -311,6 +311,7 @@
 
 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
+	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
 
 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
@@ -411,12 +412,12 @@
 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
 
 #define create_workqueue(name)						\
-	alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
+	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
 #define create_freezable_workqueue(name)				\
-	alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
-			1, (name))
+	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
+			WQ_MEM_RECLAIM, 1, (name))
 #define create_singlethread_workqueue(name)				\
-	alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ef03ae5..8a0f55b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -533,7 +533,8 @@
 		const unsigned int requested_sizes[]);
 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
-int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking);
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+		   bool nonblocking);
 
 int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 2a91a05..9b4c418 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,8 +6,8 @@
 #include <linux/mutex.h>
 #include <net/sock.h>
 
-void unix_inflight(struct file *fp);
-void unix_notinflight(struct file *fp);
+void unix_inflight(struct user_struct *user, struct file *fp);
+void unix_notinflight(struct user_struct *user, struct file *fp);
 void unix_gc(void);
 void wait_for_unix_gc(void);
 struct sock *unix_get_socket(struct file *filp);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 6db96ea..dda9abf 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -230,6 +230,7 @@
 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
 int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
 		    u8 *protocol, struct flowi4 *fl4);
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
 
 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
diff --git a/include/net/scm.h b/include/net/scm.h
index 262532d..59fa93c 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -21,6 +21,7 @@
 struct scm_fp_list {
 	short			count;
 	short			max;
+	struct user_struct	*user;
 	struct file		*fp[SCM_MAX_FD];
 };
 
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f6f8f03..ae6468f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -447,7 +447,7 @@
 
 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 void tcp_v4_mtu_reduced(struct sock *sk);
-void tcp_req_err(struct sock *sk, u32 seq);
+void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(const struct sock *sk,
 				      struct request_sock *req,
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index fdabbb4..f730b91 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -167,6 +167,10 @@
 int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
 			 unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+			      unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
+			       int count);
 
 /* main midi functions */
 
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 56cf8e4..28ee5c2 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -94,5 +94,8 @@
 	sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
 
 bool target_sense_desc_format(struct se_device *dev);
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+				       struct request_queue *q, int block_size);
 
 #endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5d82816..e8c8c08 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -140,6 +140,8 @@
 	SCF_COMPARE_AND_WRITE		= 0x00080000,
 	SCF_COMPARE_AND_WRITE_POST	= 0x00100000,
 	SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
+	SCF_ACK_KREF			= 0x00400000,
+	SCF_USE_CPUID			= 0x00800000,
 };
 
 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -187,6 +189,7 @@
 	TARGET_SCF_BIDI_OP		= 0x01,
 	TARGET_SCF_ACK_KREF		= 0x02,
 	TARGET_SCF_UNKNOWN_SIZE		= 0x04,
+	TARGET_SCF_USE_CPUID	= 0x08,
 };
 
 /* fabric independent task management function values */
@@ -490,8 +493,9 @@
 #define CMD_T_SENT		(1 << 4)
 #define CMD_T_STOP		(1 << 5)
 #define CMD_T_DEV_ACTIVE	(1 << 7)
-#define CMD_T_REQUEST_STOP	(1 << 8)
 #define CMD_T_BUSY		(1 << 9)
+#define CMD_T_TAS		(1 << 10)
+#define CMD_T_FABRIC_STOP	(1 << 11)
 	spinlock_t		t_state_lock;
 	struct kref		cmd_kref;
 	struct completion	t_transport_stop_comp;
@@ -511,9 +515,6 @@
 
 	struct list_head	state_list;
 
-	/* old task stop completion, consider merging with some of the above */
-	struct completion	task_stop_comp;
-
 	/* backend private data */
 	void			*priv;
 
diff --git a/ipc/shm.c b/ipc/shm.c
index ed3027d..331fc1b 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@
 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 
 	/*
-	 * We raced in the idr lookup or with shm_destroy().  Either way, the
-	 * ID is busted.
+	 * Callers of shm_lock() must validate the status of the returned ipc
+	 * object pointer (as returned by ipc_lock()), and error out as
+	 * appropriate.
 	 */
-	WARN_ON(IS_ERR(ipcp));
-
+	if (IS_ERR(ipcp))
+		return (void *)ipcp;
 	return container_of(ipcp, struct shmid_kernel, shm_perm);
 }
 
@@ -186,18 +187,33 @@
 }
 
 
-/* This is called by fork, once for every shm attach. */
-static void shm_open(struct vm_area_struct *vma)
+static int __shm_open(struct vm_area_struct *vma)
 {
 	struct file *file = vma->vm_file;
 	struct shm_file_data *sfd = shm_file_data(file);
 	struct shmid_kernel *shp;
 
 	shp = shm_lock(sfd->ns, sfd->id);
+
+	if (IS_ERR(shp))
+		return PTR_ERR(shp);
+
 	shp->shm_atim = get_seconds();
 	shp->shm_lprid = task_tgid_vnr(current);
 	shp->shm_nattch++;
 	shm_unlock(shp);
+	return 0;
+}
+
+/* This is called by fork, once for every shm attach. */
+static void shm_open(struct vm_area_struct *vma)
+{
+	int err = __shm_open(vma);
+	/*
+	 * We raced in the idr lookup or with shm_destroy().
+	 * Either way, the ID is busted.
+	 */
+	WARN_ON_ONCE(err);
 }
 
 /*
@@ -260,6 +276,14 @@
 	down_write(&shm_ids(ns).rwsem);
 	/* remove from the list of attaches of the shm segment */
 	shp = shm_lock(ns, sfd->id);
+
+	/*
+	 * We raced in the idr lookup or with shm_destroy().
+	 * Either way, the ID is busted.
+	 */
+	if (WARN_ON_ONCE(IS_ERR(shp)))
+		goto done; /* no-op */
+
 	shp->shm_lprid = task_tgid_vnr(current);
 	shp->shm_dtim = get_seconds();
 	shp->shm_nattch--;
@@ -267,6 +291,7 @@
 		shm_destroy(ns, shp);
 	else
 		shm_unlock(shp);
+done:
 	up_write(&shm_ids(ns).rwsem);
 }
 
@@ -388,17 +413,25 @@
 	struct shm_file_data *sfd = shm_file_data(file);
 	int ret;
 
-	ret = sfd->file->f_op->mmap(sfd->file, vma);
-	if (ret != 0)
+	/*
+	 * In case of remap_file_pages() emulation, the file can represent
+	 * removed IPC ID: propogate shm_lock() error to caller.
+	 */
+	ret =__shm_open(vma);
+	if (ret)
 		return ret;
+
+	ret = sfd->file->f_op->mmap(sfd->file, vma);
+	if (ret) {
+		shm_close(vma);
+		return ret;
+	}
 	sfd->vm_ops = vma->vm_ops;
 #ifdef CONFIG_MMU
 	WARN_ON(!sfd->vm_ops->fault);
 #endif
 	vma->vm_ops = &shm_vm_ops;
-	shm_open(vma);
-
-	return ret;
+	return 0;
 }
 
 static int shm_release(struct inode *ino, struct file *file)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d1d3e8f..2e7f7ab 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2082,7 +2082,7 @@
 		/* adjust offset of jmps if necessary */
 		if (i < pos && i + insn->off + 1 > pos)
 			insn->off += delta;
-		else if (i > pos && i + insn->off + 1 < pos)
+		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
 			insn->off -= delta;
 	}
 }
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c03a640..d27904c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -58,6 +58,7 @@
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
+#include <linux/cpuset.h>
 #include <net/sock.h>
 
 /*
@@ -2739,6 +2740,7 @@
 out_unlock_threadgroup:
 	percpu_up_write(&cgroup_threadgroup_rwsem);
 	cgroup_kn_unlock(of->kn);
+	cpuset_post_attach_flush();
 	return ret ?: nbytes;
 }
 
@@ -4655,14 +4657,15 @@
 
 	if (ss) {
 		/* css free path */
+		struct cgroup_subsys_state *parent = css->parent;
 		int id = css->id;
 
-		if (css->parent)
-			css_put(css->parent);
-
 		ss->css_free(css);
 		cgroup_idr_remove(&ss->css_idr, id);
 		cgroup_put(cgrp);
+
+		if (parent)
+			css_put(parent);
 	} else {
 		/* cgroup free path */
 		atomic_dec(&cgrp->root->nr_cgrps);
@@ -4758,6 +4761,7 @@
 	INIT_LIST_HEAD(&css->sibling);
 	INIT_LIST_HEAD(&css->children);
 	css->serial_nr = css_serial_nr_next++;
+	atomic_set(&css->online_cnt, 0);
 
 	if (cgroup_parent(cgrp)) {
 		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4780,6 +4784,10 @@
 	if (!ret) {
 		css->flags |= CSS_ONLINE;
 		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+		atomic_inc(&css->online_cnt);
+		if (css->parent)
+			atomic_inc(&css->parent->online_cnt);
 	}
 	return ret;
 }
@@ -5017,10 +5025,15 @@
 		container_of(work, struct cgroup_subsys_state, destroy_work);
 
 	mutex_lock(&cgroup_mutex);
-	offline_css(css);
-	mutex_unlock(&cgroup_mutex);
 
-	css_put(css);
+	do {
+		offline_css(css);
+		css_put(css);
+		/* @css can't go away while we're holding cgroup_mutex */
+		css = css->parent;
+	} while (css && atomic_dec_and_test(&css->online_cnt));
+
+	mutex_unlock(&cgroup_mutex);
 }
 
 /* css kill confirmation processing requires process context, bounce */
@@ -5029,8 +5042,10 @@
 	struct cgroup_subsys_state *css =
 		container_of(ref, struct cgroup_subsys_state, refcnt);
 
-	INIT_WORK(&css->destroy_work, css_killed_work_fn);
-	queue_work(cgroup_destroy_wq, &css->destroy_work);
+	if (atomic_dec_and_test(&css->online_cnt)) {
+		INIT_WORK(&css->destroy_work, css_killed_work_fn);
+		queue_work(cgroup_destroy_wq, &css->destroy_work);
+	}
 }
 
 /**
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3e945fc..41989ab 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -287,6 +287,8 @@
 static DEFINE_MUTEX(cpuset_mutex);
 static DEFINE_SPINLOCK(callback_lock);
 
+static struct workqueue_struct *cpuset_migrate_mm_wq;
+
 /*
  * CPU / memory hotplug is handled asynchronously.
  */
@@ -972,31 +974,51 @@
 }
 
 /*
- * cpuset_migrate_mm
- *
- *    Migrate memory region from one set of nodes to another.
- *
- *    Temporarilly set tasks mems_allowed to target nodes of migration,
- *    so that the migration code can allocate pages on these nodes.
- *
- *    While the mm_struct we are migrating is typically from some
- *    other task, the task_struct mems_allowed that we are hacking
- *    is for our current task, which must allocate new pages for that
- *    migrating memory region.
+ * Migrate memory region from one set of nodes to another.  This is
+ * performed asynchronously as it can be called from process migration path
+ * holding locks involved in process management.  All mm migrations are
+ * performed in the queued order and can be waited for by flushing
+ * cpuset_migrate_mm_wq.
  */
 
+struct cpuset_migrate_mm_work {
+	struct work_struct	work;
+	struct mm_struct	*mm;
+	nodemask_t		from;
+	nodemask_t		to;
+};
+
+static void cpuset_migrate_mm_workfn(struct work_struct *work)
+{
+	struct cpuset_migrate_mm_work *mwork =
+		container_of(work, struct cpuset_migrate_mm_work, work);
+
+	/* on a wq worker, no need to worry about %current's mems_allowed */
+	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
+	mmput(mwork->mm);
+	kfree(mwork);
+}
+
 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 							const nodemask_t *to)
 {
-	struct task_struct *tsk = current;
+	struct cpuset_migrate_mm_work *mwork;
 
-	tsk->mems_allowed = *to;
+	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
+	if (mwork) {
+		mwork->mm = mm;
+		mwork->from = *from;
+		mwork->to = *to;
+		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
+		queue_work(cpuset_migrate_mm_wq, &mwork->work);
+	} else {
+		mmput(mm);
+	}
+}
 
-	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
-
-	rcu_read_lock();
-	guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
-	rcu_read_unlock();
+void cpuset_post_attach_flush(void)
+{
+	flush_workqueue(cpuset_migrate_mm_wq);
 }
 
 /*
@@ -1097,7 +1119,8 @@
 		mpol_rebind_mm(mm, &cs->mems_allowed);
 		if (migrate)
 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
-		mmput(mm);
+		else
+			mmput(mm);
 	}
 	css_task_iter_end(&it);
 
@@ -1545,11 +1568,11 @@
 			 * @old_mems_allowed is the right nodesets that we
 			 * migrate mm from.
 			 */
-			if (is_memory_migrate(cs)) {
+			if (is_memory_migrate(cs))
 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
 						  &cpuset_attach_nodemask_to);
-			}
-			mmput(mm);
+			else
+				mmput(mm);
 		}
 	}
 
@@ -1714,6 +1737,7 @@
 	mutex_unlock(&cpuset_mutex);
 	kernfs_unbreak_active_protection(of->kn);
 	css_put(&cs->css);
+	flush_workqueue(cpuset_migrate_mm_wq);
 	return retval ?: nbytes;
 }
 
@@ -2359,6 +2383,9 @@
 	top_cpuset.effective_mems = node_states[N_MEMORY];
 
 	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
+
+	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
+	BUG_ON(!cpuset_migrate_mm_wq);
 }
 
 /**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5946460..0d58522 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9206,7 +9206,7 @@
 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
 	mutex_lock(&swhash->hlist_mutex);
-	if (swhash->hlist_refcount > 0) {
+	if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
 		struct swevent_hlist *hlist;
 
 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -9282,11 +9282,9 @@
 	switch (action & ~CPU_TASKS_FROZEN) {
 
 	case CPU_UP_PREPARE:
-	case CPU_DOWN_FAILED:
 		perf_event_init_cpu(cpu);
 		break;
 
-	case CPU_UP_CANCELED:
 	case CPU_DOWN_PREPARE:
 		perf_event_exit_cpu(cpu);
 		break;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 60ace56..716547f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -292,7 +292,7 @@
 #define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
 #define classhashentry(key)	(classhash_table + __classhashfn((key)))
 
-static struct list_head classhash_table[CLASSHASH_SIZE];
+static struct hlist_head classhash_table[CLASSHASH_SIZE];
 
 /*
  * We put the lock dependency chains into a hash-table as well, to cache
@@ -303,7 +303,7 @@
 #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
 #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 
-static struct list_head chainhash_table[CHAINHASH_SIZE];
+static struct hlist_head chainhash_table[CHAINHASH_SIZE];
 
 /*
  * The hash key of the lock dependency chains is a hash itself too:
@@ -666,7 +666,7 @@
 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 {
 	struct lockdep_subclass_key *key;
-	struct list_head *hash_head;
+	struct hlist_head *hash_head;
 	struct lock_class *class;
 
 #ifdef CONFIG_DEBUG_LOCKDEP
@@ -719,7 +719,7 @@
 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
 		return NULL;
 
-	list_for_each_entry_rcu(class, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
 		if (class->key == key) {
 			/*
 			 * Huh! same key, different name? Did someone trample
@@ -742,7 +742,7 @@
 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 {
 	struct lockdep_subclass_key *key;
-	struct list_head *hash_head;
+	struct hlist_head *hash_head;
 	struct lock_class *class;
 
 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -774,7 +774,7 @@
 	 * We have to do the hash-walk again, to avoid races
 	 * with another CPU:
 	 */
-	list_for_each_entry_rcu(class, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
 		if (class->key == key)
 			goto out_unlock_set;
 	}
@@ -805,7 +805,7 @@
 	 * We use RCU's safe list-add method to make
 	 * parallel walking of the hash-list safe:
 	 */
-	list_add_tail_rcu(&class->hash_entry, hash_head);
+	hlist_add_head_rcu(&class->hash_entry, hash_head);
 	/*
 	 * Add it to the global list of classes:
 	 */
@@ -1822,7 +1822,7 @@
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-	       struct held_lock *next, int distance, int trylock_loop)
+	       struct held_lock *next, int distance, int *stack_saved)
 {
 	struct lock_list *entry;
 	int ret;
@@ -1883,8 +1883,11 @@
 		}
 	}
 
-	if (!trylock_loop && !save_trace(&trace))
-		return 0;
+	if (!*stack_saved) {
+		if (!save_trace(&trace))
+			return 0;
+		*stack_saved = 1;
+	}
 
 	/*
 	 * Ok, all validations passed, add the new lock
@@ -1907,6 +1910,8 @@
 	 * Debugging printouts:
 	 */
 	if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
+		/* We drop graph lock, so another thread can overwrite trace. */
+		*stack_saved = 0;
 		graph_unlock();
 		printk("\n new dependency: ");
 		print_lock_name(hlock_class(prev));
@@ -1929,7 +1934,7 @@
 check_prevs_add(struct task_struct *curr, struct held_lock *next)
 {
 	int depth = curr->lockdep_depth;
-	int trylock_loop = 0;
+	int stack_saved = 0;
 	struct held_lock *hlock;
 
 	/*
@@ -1956,7 +1961,7 @@
 		 */
 		if (hlock->read != 2 && hlock->check) {
 			if (!check_prev_add(curr, hlock, next,
-						distance, trylock_loop))
+						distance, &stack_saved))
 				return 0;
 			/*
 			 * Stop after the first non-trylock entry,
@@ -1979,7 +1984,6 @@
 		if (curr->held_locks[depth].irq_context !=
 				curr->held_locks[depth-1].irq_context)
 			break;
-		trylock_loop = 1;
 	}
 	return 1;
 out_bug:
@@ -2017,7 +2021,7 @@
 				     u64 chain_key)
 {
 	struct lock_class *class = hlock_class(hlock);
-	struct list_head *hash_head = chainhashentry(chain_key);
+	struct hlist_head *hash_head = chainhashentry(chain_key);
 	struct lock_chain *chain;
 	struct held_lock *hlock_curr;
 	int i, j;
@@ -2033,7 +2037,7 @@
 	 * We can walk it lock-free, because entries only get added
 	 * to the hash:
 	 */
-	list_for_each_entry_rcu(chain, hash_head, entry) {
+	hlist_for_each_entry_rcu(chain, hash_head, entry) {
 		if (chain->chain_key == chain_key) {
 cache_hit:
 			debug_atomic_inc(chain_lookup_hits);
@@ -2057,7 +2061,7 @@
 	/*
 	 * We have to walk the chain again locked - to avoid duplicates:
 	 */
-	list_for_each_entry(chain, hash_head, entry) {
+	hlist_for_each_entry(chain, hash_head, entry) {
 		if (chain->chain_key == chain_key) {
 			graph_unlock();
 			goto cache_hit;
@@ -2091,7 +2095,7 @@
 		}
 		chain_hlocks[chain->base + j] = class - lock_classes;
 	}
-	list_add_tail_rcu(&chain->entry, hash_head);
+	hlist_add_head_rcu(&chain->entry, hash_head);
 	debug_atomic_inc(chain_lookup_misses);
 	inc_chains();
 
@@ -3875,7 +3879,7 @@
 	nr_process_chains = 0;
 	debug_locks = 1;
 	for (i = 0; i < CHAINHASH_SIZE; i++)
-		INIT_LIST_HEAD(chainhash_table + i);
+		INIT_HLIST_HEAD(chainhash_table + i);
 	raw_local_irq_restore(flags);
 }
 
@@ -3894,7 +3898,7 @@
 	/*
 	 * Unhash the class and remove it from the all_lock_classes list:
 	 */
-	list_del_rcu(&class->hash_entry);
+	hlist_del_rcu(&class->hash_entry);
 	list_del_rcu(&class->lock_entry);
 
 	RCU_INIT_POINTER(class->key, NULL);
@@ -3917,7 +3921,7 @@
 void lockdep_free_key_range(void *start, unsigned long size)
 {
 	struct lock_class *class;
-	struct list_head *head;
+	struct hlist_head *head;
 	unsigned long flags;
 	int i;
 	int locked;
@@ -3930,9 +3934,7 @@
 	 */
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
 		head = classhash_table + i;
-		if (list_empty(head))
-			continue;
-		list_for_each_entry_rcu(class, head, hash_entry) {
+		hlist_for_each_entry_rcu(class, head, hash_entry) {
 			if (within(class->key, start, size))
 				zap_class(class);
 			else if (within(class->name, start, size))
@@ -3962,7 +3964,7 @@
 void lockdep_reset_lock(struct lockdep_map *lock)
 {
 	struct lock_class *class;
-	struct list_head *head;
+	struct hlist_head *head;
 	unsigned long flags;
 	int i, j;
 	int locked;
@@ -3987,9 +3989,7 @@
 	locked = graph_lock();
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
 		head = classhash_table + i;
-		if (list_empty(head))
-			continue;
-		list_for_each_entry_rcu(class, head, hash_entry) {
+		hlist_for_each_entry_rcu(class, head, hash_entry) {
 			int match = 0;
 
 			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
@@ -4027,10 +4027,10 @@
 		return;
 
 	for (i = 0; i < CLASSHASH_SIZE; i++)
-		INIT_LIST_HEAD(classhash_table + i);
+		INIT_HLIST_HEAD(classhash_table + i);
 
 	for (i = 0; i < CHAINHASH_SIZE; i++)
-		INIT_LIST_HEAD(chainhash_table + i);
+		INIT_HLIST_HEAD(chainhash_table + i);
 
 	lockdep_initialized = 1;
 }
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 70ee377..7a1b5c3 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -114,7 +114,7 @@
 
 static void devm_memremap_release(struct device *dev, void *res)
 {
-	memunmap(res);
+	memunmap(*(void **)res);
 }
 
 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
@@ -150,7 +150,7 @@
 }
 EXPORT_SYMBOL(devm_memunmap);
 
-pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
+pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
 {
 	return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
 }
diff --git a/kernel/module.c b/kernel/module.c
index 8358f46..794ebe8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -303,6 +303,9 @@
 	struct _ddebug *debug;
 	unsigned int num_debug;
 	bool sig_ok;
+#ifdef CONFIG_KALLSYMS
+	unsigned long mod_kallsyms_init_off;
+#endif
 	struct {
 		unsigned int sym, str, mod, vers, info, pcpu;
 	} index;
@@ -981,6 +984,8 @@
 		mod->exit();
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
+	ftrace_release_mod(mod);
+
 	async_synchronize_full();
 
 	/* Store the name of the last unloaded module for diagnostic purposes */
@@ -2480,10 +2485,21 @@
 	strsect->sh_flags |= SHF_ALLOC;
 	strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
 					 info->index.str) | INIT_OFFSET_MASK;
-	mod->init_layout.size = debug_align(mod->init_layout.size);
 	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+
+	/* We'll tack temporary mod_kallsyms on the end. */
+	mod->init_layout.size = ALIGN(mod->init_layout.size,
+				      __alignof__(struct mod_kallsyms));
+	info->mod_kallsyms_init_off = mod->init_layout.size;
+	mod->init_layout.size += sizeof(struct mod_kallsyms);
+	mod->init_layout.size = debug_align(mod->init_layout.size);
 }
 
+/*
+ * We use the full symtab and strtab which layout_symtab arranged to
+ * be appended to the init section.  Later we switch to the cut-down
+ * core-only ones.
+ */
 static void add_kallsyms(struct module *mod, const struct load_info *info)
 {
 	unsigned int i, ndst;
@@ -2492,29 +2508,34 @@
 	char *s;
 	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
 
-	mod->symtab = (void *)symsec->sh_addr;
-	mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+	/* Set up to point into init section. */
+	mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
+
+	mod->kallsyms->symtab = (void *)symsec->sh_addr;
+	mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
 	/* Make sure we get permanent strtab: don't use info->strtab. */
-	mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+	mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
 	/* Set types up while we still have access to sections. */
-	for (i = 0; i < mod->num_symtab; i++)
-		mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+	for (i = 0; i < mod->kallsyms->num_symtab; i++)
+		mod->kallsyms->symtab[i].st_info
+			= elf_type(&mod->kallsyms->symtab[i], info);
 
-	mod->core_symtab = dst = mod->core_layout.base + info->symoffs;
-	mod->core_strtab = s = mod->core_layout.base + info->stroffs;
-	src = mod->symtab;
-	for (ndst = i = 0; i < mod->num_symtab; i++) {
+	/* Now populate the cut down core kallsyms for after init. */
+	mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
+	mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
+	src = mod->kallsyms->symtab;
+	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
 		if (i == 0 ||
 		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
 				   info->index.pcpu)) {
 			dst[ndst] = src[i];
-			dst[ndst++].st_name = s - mod->core_strtab;
-			s += strlcpy(s, &mod->strtab[src[i].st_name],
+			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
 				     KSYM_NAME_LEN) + 1;
 		}
 	}
-	mod->core_num_syms = ndst;
+	mod->core_kallsyms.num_symtab = ndst;
 }
 #else
 static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -3263,9 +3284,8 @@
 	module_put(mod);
 	trim_init_extable(mod);
 #ifdef CONFIG_KALLSYMS
-	mod->num_symtab = mod->core_num_syms;
-	mod->symtab = mod->core_symtab;
-	mod->strtab = mod->core_strtab;
+	/* Switch to core kallsyms now init is done: kallsyms may be walking! */
+	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
 #endif
 	mod_tree_remove_init(mod);
 	disable_ro_nx(&mod->init_layout);
@@ -3295,6 +3315,7 @@
 	module_put(mod);
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
+	ftrace_release_mod(mod);
 	free_module(mod);
 	wake_up_all(&module_wq);
 	return ret;
@@ -3371,6 +3392,7 @@
 	mod->state = MODULE_STATE_COMING;
 	mutex_unlock(&module_mutex);
 
+	ftrace_module_enable(mod);
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_COMING, mod);
 	return 0;
@@ -3496,7 +3518,7 @@
 
 	/* Module is ready to execute: parsing args may do that. */
 	after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
-				  -32768, 32767, NULL,
+				  -32768, 32767, mod,
 				  unknown_module_param_cb);
 	if (IS_ERR(after_dashes)) {
 		err = PTR_ERR(after_dashes);
@@ -3627,6 +3649,11 @@
 	       && (str[2] == '\0' || str[2] == '.');
 }
 
+static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+{
+	return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
+}
+
 static const char *get_ksymbol(struct module *mod,
 			       unsigned long addr,
 			       unsigned long *size,
@@ -3634,6 +3661,7 @@
 {
 	unsigned int i, best = 0;
 	unsigned long nextval;
+	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
 	/* At worse, next value is at end of module */
 	if (within_module_init(addr, mod))
@@ -3643,32 +3671,32 @@
 
 	/* Scan for closest preceding symbol, and next symbol. (ELF
 	   starts real symbols at 1). */
-	for (i = 1; i < mod->num_symtab; i++) {
-		if (mod->symtab[i].st_shndx == SHN_UNDEF)
+	for (i = 1; i < kallsyms->num_symtab; i++) {
+		if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
 			continue;
 
 		/* We ignore unnamed symbols: they're uninformative
 		 * and inserted at a whim. */
-		if (mod->symtab[i].st_value <= addr
-		    && mod->symtab[i].st_value > mod->symtab[best].st_value
-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+		if (*symname(kallsyms, i) == '\0'
+		    || is_arm_mapping_symbol(symname(kallsyms, i)))
+			continue;
+
+		if (kallsyms->symtab[i].st_value <= addr
+		    && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
 			best = i;
-		if (mod->symtab[i].st_value > addr
-		    && mod->symtab[i].st_value < nextval
-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
-			nextval = mod->symtab[i].st_value;
+		if (kallsyms->symtab[i].st_value > addr
+		    && kallsyms->symtab[i].st_value < nextval)
+			nextval = kallsyms->symtab[i].st_value;
 	}
 
 	if (!best)
 		return NULL;
 
 	if (size)
-		*size = nextval - mod->symtab[best].st_value;
+		*size = nextval - kallsyms->symtab[best].st_value;
 	if (offset)
-		*offset = addr - mod->symtab[best].st_value;
-	return mod->strtab + mod->symtab[best].st_name;
+		*offset = addr - kallsyms->symtab[best].st_value;
+	return symname(kallsyms, best);
 }
 
 /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
@@ -3758,19 +3786,21 @@
 
 	preempt_disable();
 	list_for_each_entry_rcu(mod, &modules, list) {
+		struct mod_kallsyms *kallsyms;
+
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
-		if (symnum < mod->num_symtab) {
-			*value = mod->symtab[symnum].st_value;
-			*type = mod->symtab[symnum].st_info;
-			strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
-				KSYM_NAME_LEN);
+		kallsyms = rcu_dereference_sched(mod->kallsyms);
+		if (symnum < kallsyms->num_symtab) {
+			*value = kallsyms->symtab[symnum].st_value;
+			*type = kallsyms->symtab[symnum].st_info;
+			strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
 			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
 			*exported = is_exported(name, *value, mod);
 			preempt_enable();
 			return 0;
 		}
-		symnum -= mod->num_symtab;
+		symnum -= kallsyms->num_symtab;
 	}
 	preempt_enable();
 	return -ERANGE;
@@ -3779,11 +3809,12 @@
 static unsigned long mod_find_symname(struct module *mod, const char *name)
 {
 	unsigned int i;
+	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
-	for (i = 0; i < mod->num_symtab; i++)
-		if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
-		    mod->symtab[i].st_info != 'U')
-			return mod->symtab[i].st_value;
+	for (i = 0; i < kallsyms->num_symtab; i++)
+		if (strcmp(name, symname(kallsyms, i)) == 0 &&
+		    kallsyms->symtab[i].st_info != 'U')
+			return kallsyms->symtab[i].st_value;
 	return 0;
 }
 
@@ -3822,11 +3853,14 @@
 	module_assert_mutex();
 
 	list_for_each_entry(mod, &modules, list) {
+		/* We hold module_mutex: no need for rcu_dereference_sched */
+		struct mod_kallsyms *kallsyms = mod->kallsyms;
+
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
-		for (i = 0; i < mod->num_symtab; i++) {
-			ret = fn(data, mod->strtab + mod->symtab[i].st_name,
-				 mod, mod->symtab[i].st_value);
+		for (i = 0; i < kallsyms->num_symtab; i++) {
+			ret = fn(data, symname(kallsyms, i),
+				 mod, kallsyms->symtab[i].st_value);
 			if (ret != 0)
 				return ret;
 		}
diff --git a/kernel/resource.c b/kernel/resource.c
index 09c0597..3669d1b 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1083,9 +1083,10 @@
 		if (!conflict)
 			break;
 		if (conflict != parent) {
-			parent = conflict;
-			if (!(conflict->flags & IORESOURCE_BUSY))
+			if (!(conflict->flags & IORESOURCE_BUSY)) {
+				parent = conflict;
 				continue;
+			}
 		}
 		if (conflict->flags & flags & IORESOURCE_MUXED) {
 			add_wait_queue(&muxed_resource_wait, &wait);
diff --git a/kernel/signal.c b/kernel/signal.c
index f3f1f7a..0508544 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3508,8 +3508,10 @@
 	current->saved_sigmask = current->blocked;
 	set_current_blocked(set);
 
-	__set_current_state(TASK_INTERRUPTIBLE);
-	schedule();
+	while (!signal_pending(current)) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+	}
 	set_restore_sigmask();
 	return -ERESTARTNOHAND;
 }
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca592f..57a6eea 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4961,7 +4961,7 @@
 	mutex_unlock(&ftrace_lock);
 }
 
-static void ftrace_module_enable(struct module *mod)
+void ftrace_module_enable(struct module *mod)
 {
 	struct dyn_ftrace *rec;
 	struct ftrace_page *pg;
@@ -5038,38 +5038,8 @@
 	ftrace_process_locs(mod, mod->ftrace_callsites,
 			    mod->ftrace_callsites + mod->num_ftrace_callsites);
 }
-
-static int ftrace_module_notify(struct notifier_block *self,
-				unsigned long val, void *data)
-{
-	struct module *mod = data;
-
-	switch (val) {
-	case MODULE_STATE_COMING:
-		ftrace_module_enable(mod);
-		break;
-	case MODULE_STATE_GOING:
-		ftrace_release_mod(mod);
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-#else
-static int ftrace_module_notify(struct notifier_block *self,
-				unsigned long val, void *data)
-{
-	return 0;
-}
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_nb = {
-	.notifier_call = ftrace_module_notify,
-	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
-};
-
 void __init ftrace_init(void)
 {
 	extern unsigned long __start_mcount_loc[];
@@ -5098,10 +5068,6 @@
 				  __start_mcount_loc,
 				  __stop_mcount_loc);
 
-	ret = register_module_notifier(&ftrace_module_nb);
-	if (ret)
-		pr_warning("Failed to register trace ftrace module exit notifier\n");
-
 	set_ftrace_early_filters();
 
 	return;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index dda9e67..202df6c 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -126,6 +126,13 @@
 	}
 
 	/*
+	 * Some archs may not have the passed in ip in the dump.
+	 * If that happens, we need to show everything.
+	 */
+	if (i == stack_trace_max.nr_entries)
+		i = 0;
+
+	/*
 	 * Now find where in the stack these are.
 	 */
 	x = 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 61a0264..7ff5dc7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -301,7 +301,23 @@
 static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
 static bool workqueue_freezing;		/* PL: have wqs started freezing? */
 
-static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */
+/* PL: allowable cpus for unbound wqs and work items */
+static cpumask_var_t wq_unbound_cpumask;
+
+/* CPU where unbound work was last round robin scheduled from this CPU */
+static DEFINE_PER_CPU(int, wq_rr_cpu_last);
+
+/*
+ * Local execution of unbound work items is no longer guaranteed.  The
+ * following always forces round-robin CPU selection on unbound work items
+ * to uncover usages which depend on it.
+ */
+#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
+static bool wq_debug_force_rr_cpu = true;
+#else
+static bool wq_debug_force_rr_cpu = false;
+#endif
+module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 
 /* the per-cpu worker pools */
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
@@ -570,6 +586,16 @@
 						  int node)
 {
 	assert_rcu_or_wq_mutex_or_pool_mutex(wq);
+
+	/*
+	 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
+	 * delayed item is pending.  The plan is to keep CPU -> NODE
+	 * mapping valid and stable across CPU on/offlines.  Once that
+	 * happens, this workaround can be removed.
+	 */
+	if (unlikely(node == NUMA_NO_NODE))
+		return wq->dfl_pwq;
+
 	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 }
 
@@ -1298,6 +1324,39 @@
 	return worker && worker->current_pwq->wq == wq;
 }
 
+/*
+ * When queueing an unbound work item to a wq, prefer local CPU if allowed
+ * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
+ * avoid perturbing sensitive tasks.
+ */
+static int wq_select_unbound_cpu(int cpu)
+{
+	static bool printed_dbg_warning;
+	int new_cpu;
+
+	if (likely(!wq_debug_force_rr_cpu)) {
+		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
+			return cpu;
+	} else if (!printed_dbg_warning) {
+		pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
+		printed_dbg_warning = true;
+	}
+
+	if (cpumask_empty(wq_unbound_cpumask))
+		return cpu;
+
+	new_cpu = __this_cpu_read(wq_rr_cpu_last);
+	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
+	if (unlikely(new_cpu >= nr_cpu_ids)) {
+		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
+		if (unlikely(new_cpu >= nr_cpu_ids))
+			return cpu;
+	}
+	__this_cpu_write(wq_rr_cpu_last, new_cpu);
+
+	return new_cpu;
+}
+
 static void __queue_work(int cpu, struct workqueue_struct *wq,
 			 struct work_struct *work)
 {
@@ -1323,7 +1382,7 @@
 		return;
 retry:
 	if (req_cpu == WORK_CPU_UNBOUND)
-		cpu = raw_smp_processor_id();
+		cpu = wq_select_unbound_cpu(raw_smp_processor_id());
 
 	/* pwq which will be used unless @work is executing elsewhere */
 	if (!(wq->flags & WQ_UNBOUND))
@@ -1464,13 +1523,13 @@
 	timer_stats_timer_set_start_info(&dwork->timer);
 
 	dwork->wq = wq;
-	/* timer isn't guaranteed to run in this cpu, record earlier */
-	if (cpu == WORK_CPU_UNBOUND)
-		cpu = raw_smp_processor_id();
 	dwork->cpu = cpu;
 	timer->expires = jiffies + delay;
 
-	add_timer_on(timer, cpu);
+	if (unlikely(cpu != WORK_CPU_UNBOUND))
+		add_timer_on(timer, cpu);
+	else
+		add_timer(timer);
 }
 
 /**
@@ -2355,7 +2414,8 @@
 	WARN_ONCE(current->flags & PF_MEMALLOC,
 		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
 		  current->pid, current->comm, target_wq->name, target_func);
-	WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM),
+	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
+			      (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
 		  "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
 		  worker->current_pwq->wq->name, worker->current_func,
 		  target_wq->name, target_func);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ecb9e75..8bfd1ac 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1400,6 +1400,21 @@
 
 endmenu # "RCU Debugging"
 
+config DEBUG_WQ_FORCE_RR_CPU
+	bool "Force round-robin CPU selection for unbound work items"
+	depends on DEBUG_KERNEL
+	default n
+	help
+	  Workqueue used to implicitly guarantee that work items queued
+	  without explicit CPU specified are put on the local CPU.  This
+	  guarantee is no longer true and while local CPU is still
+	  preferred work items may be put on foreign CPUs.  Kernel
+	  parameter "workqueue.debug_force_rr_cpu" is added to force
+	  round-robin CPU selection to flush out usages which depend on the
+	  now broken guarantee.  This config option enables the debug
+	  feature by default.  When enabled, memory and cache locality will
+	  be impacted.
+
 config DEBUG_BLOCK_EXT_DEVT
         bool "Force extended block device numbers and spread them"
 	depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 49518fb..e07c1ba 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -18,6 +18,8 @@
 	  This option activates instrumentation for the entire kernel.
 	  If you don't enable this option, you have to explicitly specify
 	  UBSAN_SANITIZE := y for the files/directories you want to check for UB.
+	  Enabling this option will get kernel image size increased
+	  significantly.
 
 config UBSAN_ALIGNMENT
 	bool "Enable checking of pointers alignment"
@@ -25,5 +27,5 @@
 	default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
 	help
 	  This option enables detection of unaligned memory accesses.
-	  Enabling this option on architectures that support unalligned
+	  Enabling this option on architectures that support unaligned
 	  accesses may produce a lot of false positives.
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 6745c62..c30d07e 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -25,6 +25,7 @@
 
 asmlinkage __visible void dump_stack(void)
 {
+	unsigned long flags;
 	int was_locked;
 	int old;
 	int cpu;
@@ -33,9 +34,8 @@
 	 * Permit this cpu to perform nested stack dumps while serialising
 	 * against other CPUs
 	 */
-	preempt_disable();
-
 retry:
+	local_irq_save(flags);
 	cpu = smp_processor_id();
 	old = atomic_cmpxchg(&dump_lock, -1, cpu);
 	if (old == -1) {
@@ -43,6 +43,7 @@
 	} else if (old == cpu) {
 		was_locked = 1;
 	} else {
+		local_irq_restore(flags);
 		cpu_relax();
 		goto retry;
 	}
@@ -52,7 +53,7 @@
 	if (!was_locked)
 		atomic_set(&dump_lock, -1);
 
-	preempt_enable();
+	local_irq_restore(flags);
 }
 #else
 asmlinkage __visible void dump_stack(void)
diff --git a/lib/klist.c b/lib/klist.c
index d74cf7a..0507fa5 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -282,9 +282,9 @@
 			  struct klist_node *n)
 {
 	i->i_klist = k;
-	i->i_cur = n;
-	if (n)
-		kref_get(&n->n_ref);
+	i->i_cur = NULL;
+	if (n && kref_get_unless_zero(&n->n_ref))
+		i->i_cur = n;
 }
 EXPORT_SYMBOL_GPL(klist_iter_init_node);
 
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index fcf5d98..6b79e90 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1019,9 +1019,13 @@
 		return 0;
 
 	radix_tree_for_each_slot(slot, root, &iter, first_index) {
-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+		results[ret] = rcu_dereference_raw(*slot);
 		if (!results[ret])
 			continue;
+		if (radix_tree_is_indirect_ptr(results[ret])) {
+			slot = radix_tree_iter_retry(&iter);
+			continue;
+		}
 		if (++ret == max_items)
 			break;
 	}
@@ -1098,9 +1102,13 @@
 		return 0;
 
 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+		results[ret] = rcu_dereference_raw(*slot);
 		if (!results[ret])
 			continue;
+		if (radix_tree_is_indirect_ptr(results[ret])) {
+			slot = radix_tree_iter_retry(&iter);
+			continue;
+		}
 		if (++ret == max_items)
 			break;
 	}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index bafa993..004fc70 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -598,9 +598,9 @@
  *
  * Description:
  *   Stops mapping iterator @miter.  @miter should have been started
- *   started using sg_miter_start().  A stopped iteration can be
- *   resumed by calling sg_miter_next() on it.  This is useful when
- *   resources (kmap) need to be released during iteration.
+ *   using sg_miter_start().  A stopped iteration can be resumed by
+ *   calling sg_miter_next() on it.  This is useful when resources (kmap)
+ *   need to be released during iteration.
  *
  * Context:
  *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 98866a7..25b5cbf 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -327,36 +327,67 @@
 }
 
 #define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, units, exp_result)            \
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2)    \
 	do {                                                                   \
-		BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf);    \
-		__test_string_get_size((size), (blk_size), (units),            \
-				       (exp_result));                          \
+		BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf);  \
+		BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf);   \
+		__test_string_get_size((size), (blk_size), (exp_result10),     \
+				       (exp_result2));                         \
 	} while (0)
 
 
-static __init void __test_string_get_size(const u64 size, const u64 blk_size,
-					  const enum string_size_units units,
-					  const char *exp_result)
+static __init void test_string_get_size_check(const char *units,
+					      const char *exp,
+					      char *res,
+					      const u64 size,
+					      const u64 blk_size)
 {
-	char buf[string_get_size_maxbuf];
-
-	string_get_size(size, blk_size, units, buf, sizeof(buf));
-	if (!memcmp(buf, exp_result, strlen(exp_result) + 1))
+	if (!memcmp(res, exp, strlen(exp) + 1))
 		return;
 
-	buf[sizeof(buf) - 1] = '\0';
-	pr_warn("Test 'test_string_get_size_one' failed!\n");
-	pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n",
+	res[string_get_size_maxbuf - 1] = '\0';
+
+	pr_warn("Test 'test_string_get_size' failed!\n");
+	pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
 		size, blk_size, units);
-	pr_warn("expected: '%s', got '%s'\n", exp_result, buf);
+	pr_warn("expected: '%s', got '%s'\n", exp, res);
+}
+
+static __init void __test_string_get_size(const u64 size, const u64 blk_size,
+					  const char *exp_result10,
+					  const char *exp_result2)
+{
+	char buf10[string_get_size_maxbuf];
+	char buf2[string_get_size_maxbuf];
+
+	string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
+	string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+
+	test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
+				   size, blk_size);
+
+	test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
+				   size, blk_size);
 }
 
 static __init void test_string_get_size(void)
 {
-	test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB");
-	test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB");
-	test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B");
+	/* small values */
+	test_string_get_size_one(0, 512, "0 B", "0 B");
+	test_string_get_size_one(1, 512, "512 B", "512 B");
+	test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB");
+
+	/* normal values */
+	test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB");
+	test_string_get_size_one(500118192, 512, "256 GB", "238 GiB");
+	test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB");
+
+	/* weird block sizes */
+	test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB");
+
+	/* huge values */
+	test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB");
+	test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
 }
 
 static int __init test_string_helpers_init(void)
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 6f500ef..f0b323a 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -49,3 +49,65 @@
         }
 }
 EXPORT_SYMBOL(ucs2_strncmp);
+
+unsigned long
+ucs2_utf8size(const ucs2_char_t *src)
+{
+	unsigned long i;
+	unsigned long j = 0;
+
+	for (i = 0; i < ucs2_strlen(src); i++) {
+		u16 c = src[i];
+
+		if (c >= 0x800)
+			j += 3;
+		else if (c >= 0x80)
+			j += 2;
+		else
+			j += 1;
+	}
+
+	return j;
+}
+EXPORT_SYMBOL(ucs2_utf8size);
+
+/*
+ * copy at most maxlength bytes of whole utf8 characters to dest from the
+ * ucs2 string src.
+ *
+ * The return value is the number of characters copied, not including the
+ * final NUL character.
+ */
+unsigned long
+ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
+{
+	unsigned int i;
+	unsigned long j = 0;
+	unsigned long limit = ucs2_strnlen(src, maxlength);
+
+	for (i = 0; maxlength && i < limit; i++) {
+		u16 c = src[i];
+
+		if (c >= 0x800) {
+			if (maxlength < 3)
+				break;
+			maxlength -= 3;
+			dest[j++] = 0xe0 | (c & 0xf000) >> 12;
+			dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
+			dest[j++] = 0x80 | (c & 0x003f);
+		} else if (c >= 0x80) {
+			if (maxlength < 2)
+				break;
+			maxlength -= 2;
+			dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
+			dest[j++] = 0x80 | (c & 0x03f);
+		} else {
+			maxlength -= 1;
+			dest[j++] = c & 0x7f;
+		}
+	}
+	if (maxlength)
+		dest[j] = '\0';
+	return j;
+}
+EXPORT_SYMBOL(ucs2_as_utf8);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 48ff9c3..f44e178 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1590,22 +1590,23 @@
 			return buf;
 		}
 	case 'K':
-		/*
-		 * %pK cannot be used in IRQ context because its test
-		 * for CAP_SYSLOG would be meaningless.
-		 */
-		if (kptr_restrict && (in_irq() || in_serving_softirq() ||
-				      in_nmi())) {
-			if (spec.field_width == -1)
-				spec.field_width = default_width;
-			return string(buf, end, "pK-error", spec);
-		}
-
 		switch (kptr_restrict) {
 		case 0:
 			/* Always print %pK values */
 			break;
 		case 1: {
+			const struct cred *cred;
+
+			/*
+			 * kptr_restrict==1 cannot be used in IRQ context
+			 * because its test for CAP_SYSLOG would be meaningless.
+			 */
+			if (in_irq() || in_serving_softirq() || in_nmi()) {
+				if (spec.field_width == -1)
+					spec.field_width = default_width;
+				return string(buf, end, "pK-error", spec);
+			}
+
 			/*
 			 * Only print the real pointer value if the current
 			 * process has CAP_SYSLOG and is running with the
@@ -1615,8 +1616,7 @@
 			 * leak pointer values if a binary opens a file using
 			 * %pK and then elevates privileges before reading it.
 			 */
-			const struct cred *cred = current_cred();
-
+			cred = current_cred();
 			if (!has_capability_noaudit(current, CAP_SYSLOG) ||
 			    !uid_eq(cred->euid, cred->uid) ||
 			    !gid_eq(cred->egid, cred->gid))
diff --git a/mm/Kconfig b/mm/Kconfig
index 97a4e06..03cbfa0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -624,7 +624,7 @@
 	bool
 
 config DEFERRED_STRUCT_PAGE_INIT
-	bool "Defer initialisation of struct pages to kswapd"
+	bool "Defer initialisation of struct pages to kthreads"
 	default n
 	depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
 	depends on MEMORY_HOTPLUG
@@ -633,9 +633,10 @@
 	  single thread. On very large machines this can take a considerable
 	  amount of time. If this option is set, large machines will bring up
 	  a subset of memmap at boot and then initialise the rest in parallel
-	  when kswapd starts. This has a potential performance impact on
-	  processes running early in the lifetime of the systemm until kswapd
-	  finishes the initialisation.
+	  by starting one-off "pgdatinitX" kernel thread for each node X. This
+	  has a potential performance impact on processes running early in the
+	  lifetime of the system until these kthreads finish the
+	  initialisation.
 
 config IDLE_PAGE_TRACKING
 	bool "Enable idle page tracking"
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index cc5d29d..c554d17 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -328,7 +328,7 @@
 	return 0;
 
 out_destroy_stat:
-	while (--i)
+	while (i--)
 		percpu_counter_destroy(&wb->stat[i]);
 	fprop_local_destroy_percpu(&wb->completions);
 out_put_cong:
@@ -989,7 +989,7 @@
 		 * here rather than calling cond_resched().
 		 */
 		if (current->flags & PF_WQ_WORKER)
-			schedule_timeout(1);
+			schedule_timeout_uninterruptible(1);
 		else
 			cond_resched();
 
diff --git a/mm/filemap.c b/mm/filemap.c
index bc94386..23edcce 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1890,6 +1890,7 @@
  * page_cache_read - adds requested page to the page cache if not already there
  * @file:	file to read
  * @offset:	page index
+ * @gfp_mask:	memory allocation flags
  *
  * This adds the requested page to the page cache if it isn't already there,
  * and schedules an I/O to read in its contents from disk.
diff --git a/mm/gup.c b/mm/gup.c
index b64a361..7bf19ff 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -430,10 +430,8 @@
 			 * Anon pages in shared mappings are surprising: now
 			 * just reject it.
 			 */
-			if (!is_cow_mapping(vm_flags)) {
-				WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
+			if (!is_cow_mapping(vm_flags))
 				return -EFAULT;
-			}
 		}
 	} else if (!(vm_flags & VM_READ)) {
 		if (!(gup_flags & FOLL_FORCE))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fd3a07b..1c317b8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -138,9 +138,6 @@
 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 };
 
-static DEFINE_SPINLOCK(split_queue_lock);
-static LIST_HEAD(split_queue);
-static unsigned long split_queue_len;
 static struct shrinker deferred_split_shrinker;
 
 static void set_recommended_min_free_kbytes(void)
@@ -861,7 +858,8 @@
 		return false;
 	entry = mk_pmd(zero_page, vma->vm_page_prot);
 	entry = pmd_mkhuge(entry);
-	pgtable_trans_huge_deposit(mm, pmd, pgtable);
+	if (pgtable)
+		pgtable_trans_huge_deposit(mm, pmd, pgtable);
 	set_pmd_at(mm, haddr, pmd, entry);
 	atomic_long_inc(&mm->nr_ptes);
 	return true;
@@ -1039,13 +1037,15 @@
 	spinlock_t *dst_ptl, *src_ptl;
 	struct page *src_page;
 	pmd_t pmd;
-	pgtable_t pgtable;
+	pgtable_t pgtable = NULL;
 	int ret;
 
-	ret = -ENOMEM;
-	pgtable = pte_alloc_one(dst_mm, addr);
-	if (unlikely(!pgtable))
-		goto out;
+	if (!vma_is_dax(vma)) {
+		ret = -ENOMEM;
+		pgtable = pte_alloc_one(dst_mm, addr);
+		if (unlikely(!pgtable))
+			goto out;
+	}
 
 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
 	src_ptl = pmd_lockptr(src_mm, src_pmd);
@@ -1076,7 +1076,7 @@
 		goto out_unlock;
 	}
 
-	if (pmd_trans_huge(pmd)) {
+	if (!vma_is_dax(vma)) {
 		/* thp accounting separate from pmd_devmap accounting */
 		src_page = pmd_page(pmd);
 		VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
@@ -1700,7 +1700,8 @@
 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
 		VM_BUG_ON(!pmd_none(*new_pmd));
 
-		if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
+		if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
+				vma_is_anonymous(vma)) {
 			pgtable_t pgtable;
 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
@@ -2860,6 +2861,7 @@
 	young = pmd_young(*pmd);
 	dirty = pmd_dirty(*pmd);
 
+	pmdp_huge_split_prepare(vma, haddr, pmd);
 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
 	pmd_populate(mm, &_pmd, pgtable);
 
@@ -3358,6 +3360,7 @@
 int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
 	struct page *head = compound_head(page);
+	struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
 	struct anon_vma *anon_vma;
 	int count, mapcount, ret;
 	bool mlocked;
@@ -3401,19 +3404,19 @@
 		lru_add_drain();
 
 	/* Prevent deferred_split_scan() touching ->_count */
-	spin_lock_irqsave(&split_queue_lock, flags);
+	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
 	count = page_count(head);
 	mapcount = total_mapcount(head);
 	if (!mapcount && count == 1) {
 		if (!list_empty(page_deferred_list(head))) {
-			split_queue_len--;
+			pgdata->split_queue_len--;
 			list_del(page_deferred_list(head));
 		}
-		spin_unlock_irqrestore(&split_queue_lock, flags);
+		spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 		__split_huge_page(page, list);
 		ret = 0;
 	} else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
-		spin_unlock_irqrestore(&split_queue_lock, flags);
+		spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 		pr_alert("total_mapcount: %u, page_count(): %u\n",
 				mapcount, count);
 		if (PageTail(page))
@@ -3421,7 +3424,7 @@
 		dump_page(page, "total_mapcount(head) > 0");
 		BUG();
 	} else {
-		spin_unlock_irqrestore(&split_queue_lock, flags);
+		spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 		unfreeze_page(anon_vma, head);
 		ret = -EBUSY;
 	}
@@ -3436,64 +3439,65 @@
 
 void free_transhuge_page(struct page *page)
 {
+	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
 	unsigned long flags;
 
-	spin_lock_irqsave(&split_queue_lock, flags);
+	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
 	if (!list_empty(page_deferred_list(page))) {
-		split_queue_len--;
+		pgdata->split_queue_len--;
 		list_del(page_deferred_list(page));
 	}
-	spin_unlock_irqrestore(&split_queue_lock, flags);
+	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 	free_compound_page(page);
 }
 
 void deferred_split_huge_page(struct page *page)
 {
+	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
 	unsigned long flags;
 
 	VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 
-	spin_lock_irqsave(&split_queue_lock, flags);
+	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
 	if (list_empty(page_deferred_list(page))) {
-		list_add_tail(page_deferred_list(page), &split_queue);
-		split_queue_len++;
+		list_add_tail(page_deferred_list(page), &pgdata->split_queue);
+		pgdata->split_queue_len++;
 	}
-	spin_unlock_irqrestore(&split_queue_lock, flags);
+	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 }
 
 static unsigned long deferred_split_count(struct shrinker *shrink,
 		struct shrink_control *sc)
 {
-	/*
-	 * Split a page from split_queue will free up at least one page,
-	 * at most HPAGE_PMD_NR - 1. We don't track exact number.
-	 * Let's use HPAGE_PMD_NR / 2 as ballpark.
-	 */
-	return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
+	struct pglist_data *pgdata = NODE_DATA(sc->nid);
+	return ACCESS_ONCE(pgdata->split_queue_len);
 }
 
 static unsigned long deferred_split_scan(struct shrinker *shrink,
 		struct shrink_control *sc)
 {
+	struct pglist_data *pgdata = NODE_DATA(sc->nid);
 	unsigned long flags;
 	LIST_HEAD(list), *pos, *next;
 	struct page *page;
 	int split = 0;
 
-	spin_lock_irqsave(&split_queue_lock, flags);
-	list_splice_init(&split_queue, &list);
-
+	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
 	/* Take pin on all head pages to avoid freeing them under us */
-	list_for_each_safe(pos, next, &list) {
+	list_for_each_safe(pos, next, &pgdata->split_queue) {
 		page = list_entry((void *)pos, struct page, mapping);
 		page = compound_head(page);
-		/* race with put_compound_page() */
-		if (!get_page_unless_zero(page)) {
+		if (get_page_unless_zero(page)) {
+			list_move(page_deferred_list(page), &list);
+		} else {
+			/* We lost race with put_compound_page() */
 			list_del_init(page_deferred_list(page));
-			split_queue_len--;
+			pgdata->split_queue_len--;
 		}
+		if (!--sc->nr_to_scan)
+			break;
 	}
-	spin_unlock_irqrestore(&split_queue_lock, flags);
+	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 
 	list_for_each_safe(pos, next, &list) {
 		page = list_entry((void *)pos, struct page, mapping);
@@ -3505,17 +3509,24 @@
 		put_page(page);
 	}
 
-	spin_lock_irqsave(&split_queue_lock, flags);
-	list_splice_tail(&list, &split_queue);
-	spin_unlock_irqrestore(&split_queue_lock, flags);
+	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+	list_splice_tail(&list, &pgdata->split_queue);
+	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 
-	return split * HPAGE_PMD_NR / 2;
+	/*
+	 * Stop shrinker if we didn't split any page, but the queue is empty.
+	 * This can happen if pages were freed under us.
+	 */
+	if (!split && list_empty(&pgdata->split_queue))
+		return SHRINK_STOP;
+	return split;
 }
 
 static struct shrinker deferred_split_shrinker = {
 	.count_objects = deferred_split_count,
 	.scan_objects = deferred_split_scan,
 	.seeks = DEFAULT_SEEKS,
+	.flags = SHRINKER_NUMA_AWARE,
 };
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 12908dc..01f2b48 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1001,7 +1001,7 @@
 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
 		nr_nodes--)
 
-#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
+#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
 static void destroy_compound_gigantic_page(struct page *page,
 					unsigned int order)
 {
@@ -1214,8 +1214,8 @@
 
 	set_page_private(page, 0);
 	page->mapping = NULL;
-	BUG_ON(page_count(page));
-	BUG_ON(page_mapcount(page));
+	VM_BUG_ON_PAGE(page_count(page), page);
+	VM_BUG_ON_PAGE(page_mapcount(page), page);
 	restore_reserve = PagePrivate(page);
 	ClearPagePrivate(page);
 
@@ -1286,6 +1286,7 @@
 		set_page_count(p, 0);
 		set_compound_head(p, page);
 	}
+	atomic_set(compound_mapcount_ptr(page), -1);
 }
 
 /*
@@ -2629,8 +2630,10 @@
 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
 	}
 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
-	if (default_hstate_max_huge_pages)
-		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
+	if (default_hstate_max_huge_pages) {
+		if (!default_hstate.max_huge_pages)
+			default_hstate.max_huge_pages = default_hstate_max_huge_pages;
+	}
 
 	hugetlb_init_hstates();
 	gather_bootmem_prealloc();
diff --git a/mm/internal.h b/mm/internal.h
index ed8b5ff..a38a21e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -216,6 +216,37 @@
 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+/*
+ * These three helpers classifies VMAs for virtual memory accounting.
+ */
+
+/*
+ * Executable code area - executable, not writable, not stack
+ */
+static inline bool is_exec_mapping(vm_flags_t flags)
+{
+	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
+}
+
+/*
+ * Stack area - atomatically grows in one direction
+ *
+ * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
+ * do_mmap() forbids all other combinations.
+ */
+static inline bool is_stack_mapping(vm_flags_t flags)
+{
+	return (flags & VM_STACK) == VM_STACK;
+}
+
+/*
+ * Data area - private, writable, not stack
+ */
+static inline bool is_data_mapping(vm_flags_t flags)
+{
+	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
+}
+
 /* mm/util.c */
 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 		struct vm_area_struct *prev, struct rb_node *rb_parent);
diff --git a/mm/memblock.c b/mm/memblock.c
index d2ed81e..dd79899 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1448,7 +1448,7 @@
  * Remaining API functions
  */
 
-phys_addr_t __init memblock_phys_mem_size(void)
+phys_addr_t __init_memblock memblock_phys_mem_size(void)
 {
 	return memblock.memory.total_size;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 93ce379..635451a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2237,11 +2237,6 @@
 
 	page_cache_get(old_page);
 
-	/*
-	 * Only catch write-faults on shared writable pages,
-	 * read-only shared pages can get COWed by
-	 * get_user_pages(.write=1, .force=1).
-	 */
 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
 		int tmp;
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 27d1354..4c4187c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -548,8 +548,7 @@
 			goto retry;
 		}
 
-		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
-			migrate_page_add(page, qp->pagelist, flags);
+		migrate_page_add(page, qp->pagelist, flags);
 	}
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
@@ -625,7 +624,7 @@
 	unsigned long endvma = vma->vm_end;
 	unsigned long flags = qp->flags;
 
-	if (vma->vm_flags & VM_PFNMAP)
+	if (!vma_migratable(vma))
 		return 1;
 
 	if (endvma > end)
@@ -644,16 +643,13 @@
 
 	if (flags & MPOL_MF_LAZY) {
 		/* Similar to task_numa_work, skip inaccessible VMAs */
-		if (vma_migratable(vma) &&
-			vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
+		if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
 			change_prot_numa(vma, start, endvma);
 		return 1;
 	}
 
-	if ((flags & MPOL_MF_STRICT) ||
-	    ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
-	     vma_migratable(vma)))
-		/* queue pages from current vma */
+	/* queue pages from current vma */
+	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 		return 0;
 	return 1;
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 84b1262..76d1ec2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -42,6 +42,7 @@
 #include <linux/memory.h>
 #include <linux/printk.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/moduleparam.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -69,6 +70,8 @@
 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
 #endif
 
+static bool ignore_rlimit_data = true;
+core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
 
 static void unmap_region(struct mm_struct *mm,
 		struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -387,8 +390,9 @@
 }
 
 #ifdef CONFIG_DEBUG_VM_RB
-static int browse_rb(struct rb_root *root)
+static int browse_rb(struct mm_struct *mm)
 {
+	struct rb_root *root = &mm->mm_rb;
 	int i = 0, j, bug = 0;
 	struct rb_node *nd, *pn = NULL;
 	unsigned long prev = 0, pend = 0;
@@ -411,12 +415,14 @@
 				  vma->vm_start, vma->vm_end);
 			bug = 1;
 		}
+		spin_lock(&mm->page_table_lock);
 		if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
 			pr_emerg("free gap %lx, correct %lx\n",
 			       vma->rb_subtree_gap,
 			       vma_compute_subtree_gap(vma));
 			bug = 1;
 		}
+		spin_unlock(&mm->page_table_lock);
 		i++;
 		pn = nd;
 		prev = vma->vm_start;
@@ -453,12 +459,16 @@
 	struct vm_area_struct *vma = mm->mmap;
 
 	while (vma) {
+		struct anon_vma *anon_vma = vma->anon_vma;
 		struct anon_vma_chain *avc;
 
-		vma_lock_anon_vma(vma);
-		list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
-			anon_vma_interval_tree_verify(avc);
-		vma_unlock_anon_vma(vma);
+		if (anon_vma) {
+			anon_vma_lock_read(anon_vma);
+			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+				anon_vma_interval_tree_verify(avc);
+			anon_vma_unlock_read(anon_vma);
+		}
+
 		highest_address = vma->vm_end;
 		vma = vma->vm_next;
 		i++;
@@ -472,7 +482,7 @@
 			  mm->highest_vm_end, highest_address);
 		bug = 1;
 	}
-	i = browse_rb(&mm->mm_rb);
+	i = browse_rb(mm);
 	if (i != mm->map_count) {
 		if (i != -1)
 			pr_emerg("map_count %d rb %d\n", mm->map_count, i);
@@ -2139,32 +2149,27 @@
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	int error;
+	int error = 0;
 
 	if (!(vma->vm_flags & VM_GROWSUP))
 		return -EFAULT;
 
-	/*
-	 * We must make sure the anon_vma is allocated
-	 * so that the anon_vma locking is not a noop.
-	 */
+	/* Guard against wrapping around to address 0. */
+	if (address < PAGE_ALIGN(address+4))
+		address = PAGE_ALIGN(address+4);
+	else
+		return -ENOMEM;
+
+	/* We must make sure the anon_vma is allocated. */
 	if (unlikely(anon_vma_prepare(vma)))
 		return -ENOMEM;
-	vma_lock_anon_vma(vma);
 
 	/*
 	 * vma->vm_start/vm_end cannot change under us because the caller
 	 * is required to hold the mmap_sem in read mode.  We need the
 	 * anon_vma lock to serialize against concurrent expand_stacks.
-	 * Also guard against wrapping around to address 0.
 	 */
-	if (address < PAGE_ALIGN(address+4))
-		address = PAGE_ALIGN(address+4);
-	else {
-		vma_unlock_anon_vma(vma);
-		return -ENOMEM;
-	}
-	error = 0;
+	anon_vma_lock_write(vma->anon_vma);
 
 	/* Somebody else might have raced and expanded it already */
 	if (address > vma->vm_end) {
@@ -2182,7 +2187,7 @@
 				 * updates, but we only hold a shared mmap_sem
 				 * lock here, so we need to protect against
 				 * concurrent vma expansions.
-				 * vma_lock_anon_vma() doesn't help here, as
+				 * anon_vma_lock_write() doesn't help here, as
 				 * we don't guarantee that all growable vmas
 				 * in a mm share the same root anon vma.
 				 * So, we reuse mm->page_table_lock to guard
@@ -2205,7 +2210,7 @@
 			}
 		}
 	}
-	vma_unlock_anon_vma(vma);
+	anon_vma_unlock_write(vma->anon_vma);
 	khugepaged_enter_vma_merge(vma, vma->vm_flags);
 	validate_mm(mm);
 	return error;
@@ -2221,25 +2226,21 @@
 	struct mm_struct *mm = vma->vm_mm;
 	int error;
 
-	/*
-	 * We must make sure the anon_vma is allocated
-	 * so that the anon_vma locking is not a noop.
-	 */
-	if (unlikely(anon_vma_prepare(vma)))
-		return -ENOMEM;
-
 	address &= PAGE_MASK;
 	error = security_mmap_addr(address);
 	if (error)
 		return error;
 
-	vma_lock_anon_vma(vma);
+	/* We must make sure the anon_vma is allocated. */
+	if (unlikely(anon_vma_prepare(vma)))
+		return -ENOMEM;
 
 	/*
 	 * vma->vm_start/vm_end cannot change under us because the caller
 	 * is required to hold the mmap_sem in read mode.  We need the
 	 * anon_vma lock to serialize against concurrent expand_stacks.
 	 */
+	anon_vma_lock_write(vma->anon_vma);
 
 	/* Somebody else might have raced and expanded it already */
 	if (address < vma->vm_start) {
@@ -2257,7 +2258,7 @@
 				 * updates, but we only hold a shared mmap_sem
 				 * lock here, so we need to protect against
 				 * concurrent vma expansions.
-				 * vma_lock_anon_vma() doesn't help here, as
+				 * anon_vma_lock_write() doesn't help here, as
 				 * we don't guarantee that all growable vmas
 				 * in a mm share the same root anon vma.
 				 * So, we reuse mm->page_table_lock to guard
@@ -2278,7 +2279,7 @@
 			}
 		}
 	}
-	vma_unlock_anon_vma(vma);
+	anon_vma_unlock_write(vma->anon_vma);
 	khugepaged_enter_vma_merge(vma, vma->vm_flags);
 	validate_mm(mm);
 	return error;
@@ -2663,12 +2664,29 @@
 	if (!vma || !(vma->vm_flags & VM_SHARED))
 		goto out;
 
-	if (start < vma->vm_start || start + size > vma->vm_end)
+	if (start < vma->vm_start)
 		goto out;
 
-	if (pgoff == linear_page_index(vma, start)) {
-		ret = 0;
-		goto out;
+	if (start + size > vma->vm_end) {
+		struct vm_area_struct *next;
+
+		for (next = vma->vm_next; next; next = next->vm_next) {
+			/* hole between vmas ? */
+			if (next->vm_start != next->vm_prev->vm_end)
+				goto out;
+
+			if (next->vm_file != vma->vm_file)
+				goto out;
+
+			if (next->vm_flags != vma->vm_flags)
+				goto out;
+
+			if (start + size <= next->vm_end)
+				break;
+		}
+
+		if (!next)
+			goto out;
 	}
 
 	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
@@ -2678,9 +2696,16 @@
 	flags &= MAP_NONBLOCK;
 	flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
 	if (vma->vm_flags & VM_LOCKED) {
+		struct vm_area_struct *tmp;
 		flags |= MAP_LOCKED;
+
 		/* drop PG_Mlocked flag for over-mapped range */
-		munlock_vma_pages_range(vma, start, start + size);
+		for (tmp = vma; tmp->vm_start >= start + size;
+				tmp = tmp->vm_next) {
+			munlock_vma_pages_range(tmp,
+					max(tmp->vm_start, start),
+					min(tmp->vm_end, start + size));
+		}
 	}
 
 	file = get_file(vma->vm_file);
@@ -2982,9 +3007,17 @@
 	if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
 		return false;
 
-	if ((flags & (VM_WRITE | VM_SHARED | (VM_STACK_FLAGS &
-				(VM_GROWSUP | VM_GROWSDOWN)))) == VM_WRITE)
-		return mm->data_vm + npages <= rlimit(RLIMIT_DATA);
+	if (is_data_mapping(flags) &&
+	    mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
+		if (ignore_rlimit_data)
+			pr_warn_once("%s (%d): VmData %lu exceed data ulimit "
+				     "%lu. Will be forbidden soon.\n",
+				     current->comm, current->pid,
+				     (mm->data_vm + npages) << PAGE_SHIFT,
+				     rlimit(RLIMIT_DATA));
+		else
+			return false;
+	}
 
 	return true;
 }
@@ -2993,11 +3026,11 @@
 {
 	mm->total_vm += npages;
 
-	if ((flags & (VM_EXEC | VM_WRITE)) == VM_EXEC)
+	if (is_exec_mapping(flags))
 		mm->exec_vm += npages;
-	else if (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN)))
+	else if (is_stack_mapping(flags))
 		mm->stack_vm += npages;
-	else if ((flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+	else if (is_data_mapping(flags))
 		mm->data_vm += npages;
 }
 
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 8eb7bb4..f7cb3d4 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -160,9 +160,11 @@
 		}
 
 		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
-			if (next - addr != HPAGE_PMD_SIZE)
+			if (next - addr != HPAGE_PMD_SIZE) {
 				split_huge_pmd(vma, pmd, addr);
-			else {
+				if (pmd_none(*pmd))
+					continue;
+			} else {
 				int nr_ptes = change_huge_pmd(vma, pmd, addr,
 						newprot, prot_numa);
 
diff --git a/mm/mremap.c b/mm/mremap.c
index d77946a..8eeba02 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -210,6 +210,8 @@
 				}
 			}
 			split_huge_pmd(vma, old_pmd, old_addr);
+			if (pmd_none(*old_pmd))
+				continue;
 			VM_BUG_ON(pmd_trans_huge(*old_pmd));
 		}
 		if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63358d9..838ca8bb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5210,6 +5210,11 @@
 	pgdat->numabalancing_migrate_nr_pages = 0;
 	pgdat->numabalancing_migrate_next_window = jiffies;
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	spin_lock_init(&pgdat->split_queue_lock);
+	INIT_LIST_HEAD(&pgdat->split_queue);
+	pgdat->split_queue_len = 0;
+#endif
 	init_waitqueue_head(&pgdat->kswapd_wait);
 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
 	pgdat_page_ext_init(pgdat);
@@ -6615,7 +6620,7 @@
 	return !has_unmovable_pages(zone, page, 0, true);
 }
 
-#ifdef CONFIG_CMA
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 
 static unsigned long pfn_max_align_down(unsigned long pfn)
 {
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 9d47676..06a005b 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -90,9 +90,9 @@
  * ARCHes with special requirements for evicting THP backing TLB entries can
  * implement this. Otherwise also, it can help optimize normal TLB flush in
  * THP regime. stock flush_tlb_range() typically has optimization to nuke the
- * entire TLB TLB if flush span is greater than a threshhold, which will
+ * entire TLB if flush span is greater than a threshold, which will
  * likely be true for a single huge page. Thus a single thp flush will
- * invalidate the entire TLB which is not desitable.
+ * invalidate the entire TLB which is not desirable.
  * e.g. see arch/arc: flush_pmd_tlb_range
  */
 #define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
@@ -195,7 +195,9 @@
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 	VM_BUG_ON(pmd_trans_huge(*pmdp));
 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
-	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+	/* collapse entails shooting down ptes not pmd */
+	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 	return pmd;
 }
 #endif
diff --git a/mm/slab.c b/mm/slab.c
index 6ecc697..621fbcb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,7 +2275,7 @@
 
 	err = setup_cpu_cache(cachep, gfp);
 	if (err) {
-		__kmem_cache_shutdown(cachep);
+		__kmem_cache_release(cachep);
 		return err;
 	}
 
@@ -2414,12 +2414,13 @@
 
 int __kmem_cache_shutdown(struct kmem_cache *cachep)
 {
+	return __kmem_cache_shrink(cachep, false);
+}
+
+void __kmem_cache_release(struct kmem_cache *cachep)
+{
 	int i;
 	struct kmem_cache_node *n;
-	int rc = __kmem_cache_shrink(cachep, false);
-
-	if (rc)
-		return rc;
 
 	free_percpu(cachep->cpu_cache);
 
@@ -2430,7 +2431,6 @@
 		kfree(n);
 		cachep->node[i] = NULL;
 	}
-	return 0;
 }
 
 /*
diff --git a/mm/slab.h b/mm/slab.h
index 834ad24..2eedace 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -140,6 +140,7 @@
 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 
 int __kmem_cache_shutdown(struct kmem_cache *);
+void __kmem_cache_release(struct kmem_cache *);
 int __kmem_cache_shrink(struct kmem_cache *, bool);
 void slab_kmem_cache_release(struct kmem_cache *);
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b50aef0..065b7bd 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -693,6 +693,7 @@
 
 void slab_kmem_cache_release(struct kmem_cache *s)
 {
+	__kmem_cache_release(s);
 	destroy_memcg_params(s);
 	kfree_const(s->name);
 	kmem_cache_free(kmem_cache, s);
diff --git a/mm/slob.c b/mm/slob.c
index 17e8f8c..5ec1580 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -630,6 +630,10 @@
 	return 0;
 }
 
+void __kmem_cache_release(struct kmem_cache *c)
+{
+}
+
 int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
 {
 	return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 2e1355a..d8fbd4a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1592,18 +1592,12 @@
 	__add_partial(n, page, tail);
 }
 
-static inline void
-__remove_partial(struct kmem_cache_node *n, struct page *page)
-{
-	list_del(&page->lru);
-	n->nr_partial--;
-}
-
 static inline void remove_partial(struct kmem_cache_node *n,
 					struct page *page)
 {
 	lockdep_assert_held(&n->list_lock);
-	__remove_partial(n, page);
+	list_del(&page->lru);
+	n->nr_partial--;
 }
 
 /*
@@ -3184,6 +3178,12 @@
 	}
 }
 
+void __kmem_cache_release(struct kmem_cache *s)
+{
+	free_percpu(s->cpu_slab);
+	free_kmem_cache_nodes(s);
+}
+
 static int init_kmem_cache_nodes(struct kmem_cache *s)
 {
 	int node;
@@ -3443,28 +3443,31 @@
 
 /*
  * Attempt to free all partial slabs on a node.
- * This is called from kmem_cache_close(). We must be the last thread
- * using the cache and therefore we do not need to lock anymore.
+ * This is called from __kmem_cache_shutdown(). We must take list_lock
+ * because sysfs file might still access partial list after the shutdowning.
  */
 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
 	struct page *page, *h;
 
+	BUG_ON(irqs_disabled());
+	spin_lock_irq(&n->list_lock);
 	list_for_each_entry_safe(page, h, &n->partial, lru) {
 		if (!page->inuse) {
-			__remove_partial(n, page);
+			remove_partial(n, page);
 			discard_slab(s, page);
 		} else {
 			list_slab_objects(s, page,
-			"Objects remaining in %s on kmem_cache_close()");
+			"Objects remaining in %s on __kmem_cache_shutdown()");
 		}
 	}
+	spin_unlock_irq(&n->list_lock);
 }
 
 /*
  * Release all resources used by a slab cache.
  */
-static inline int kmem_cache_close(struct kmem_cache *s)
+int __kmem_cache_shutdown(struct kmem_cache *s)
 {
 	int node;
 	struct kmem_cache_node *n;
@@ -3476,16 +3479,9 @@
 		if (n->nr_partial || slabs_node(s, node))
 			return 1;
 	}
-	free_percpu(s->cpu_slab);
-	free_kmem_cache_nodes(s);
 	return 0;
 }
 
-int __kmem_cache_shutdown(struct kmem_cache *s)
-{
-	return kmem_cache_close(s);
-}
-
 /********************************************************************
  *		Kmalloc subsystem
  *******************************************************************/
@@ -3980,7 +3976,7 @@
 	memcg_propagate_slab_attrs(s);
 	err = sysfs_slab_add(s);
 	if (err)
-		kmem_cache_close(s);
+		__kmem_cache_release(s);
 
 	return err;
 }
diff --git a/mm/util.c b/mm/util.c
index c108a65..4fb14ca 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -230,36 +230,11 @@
 }
 
 /* Check if the vma is being used as a stack by this task */
-static int vm_is_stack_for_task(struct task_struct *t,
-				struct vm_area_struct *vma)
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
 {
 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 }
 
-/*
- * Check if the vma is being used as a stack.
- * If is_group is non-zero, check in the entire thread group or else
- * just check in the current task. Returns the task_struct of the task
- * that the vma is stack for. Must be called under rcu_read_lock().
- */
-struct task_struct *task_of_stack(struct task_struct *task,
-				struct vm_area_struct *vma, bool in_group)
-{
-	if (vm_is_stack_for_task(task, vma))
-		return task;
-
-	if (in_group) {
-		struct task_struct *t;
-
-		for_each_thread(task, t) {
-			if (vm_is_stack_for_task(t, vma))
-				return t;
-		}
-	}
-
-	return NULL;
-}
-
 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 9a6c070..149fdf6 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -248,9 +248,8 @@
 
 	if (tree) {
 		spin_lock(&vmpr->sr_lock);
-		vmpr->tree_scanned += scanned;
+		scanned = vmpr->tree_scanned += scanned;
 		vmpr->tree_reclaimed += reclaimed;
-		scanned = vmpr->scanned;
 		spin_unlock(&vmpr->sr_lock);
 
 		if (scanned < vmpressure_win)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eb3dd37..71b1c29 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1443,7 +1443,7 @@
 	int ret = -EBUSY;
 
 	VM_BUG_ON_PAGE(!page_count(page), page);
-	VM_BUG_ON_PAGE(PageTail(page), page);
+	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
 
 	if (PageLRU(page)) {
 		struct zone *zone = page_zone(page);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 40b2c74..084c672 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1396,10 +1396,15 @@
 		 * Counters were updated so we expect more updates
 		 * to occur in the future. Keep on running the
 		 * update worker thread.
+		 * If we were marked on cpu_stat_off clear the flag
+		 * so that vmstat_shepherd doesn't schedule us again.
 		 */
-		queue_delayed_work_on(smp_processor_id(), vmstat_wq,
-			this_cpu_ptr(&vmstat_work),
-			round_jiffies_relative(sysctl_stat_interval));
+		if (!cpumask_test_and_clear_cpu(smp_processor_id(),
+						cpu_stat_off)) {
+			queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+				this_cpu_ptr(&vmstat_work),
+				round_jiffies_relative(sysctl_stat_interval));
+		}
 	} else {
 		/*
 		 * We did not update any counters so the app may be in
@@ -1417,18 +1422,6 @@
  * until the diffs stay at zero. The function is used by NOHZ and can only be
  * invoked when tick processing is not active.
  */
-void quiet_vmstat(void)
-{
-	if (system_state != SYSTEM_RUNNING)
-		return;
-
-	do {
-		if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
-			cancel_delayed_work(this_cpu_ptr(&vmstat_work));
-
-	} while (refresh_cpu_vm_stats(false));
-}
-
 /*
  * Check if the diffs for a certain cpu indicate that
  * an update is needed.
@@ -1452,6 +1445,30 @@
 	return false;
 }
 
+void quiet_vmstat(void)
+{
+	if (system_state != SYSTEM_RUNNING)
+		return;
+
+	/*
+	 * If we are already in hands of the shepherd then there
+	 * is nothing for us to do here.
+	 */
+	if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
+		return;
+
+	if (!need_update(smp_processor_id()))
+		return;
+
+	/*
+	 * Just refresh counters and do not care about the pending delayed
+	 * vmstat_update. It doesn't fire that often to matter and canceling
+	 * it would be too expensive from this path.
+	 * vmstat_shepherd will take care about that for us.
+	 */
+	refresh_cpu_vm_stats(false);
+}
+
 
 /*
  * Shepherd worker thread that checks the
@@ -1469,18 +1486,25 @@
 
 	get_online_cpus();
 	/* Check processors whose vmstat worker threads have been disabled */
-	for_each_cpu(cpu, cpu_stat_off)
-		if (need_update(cpu) &&
-			cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
+	for_each_cpu(cpu, cpu_stat_off) {
+		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
-			queue_delayed_work_on(cpu, vmstat_wq,
-				&per_cpu(vmstat_work, cpu), 0);
-
+		if (need_update(cpu)) {
+			if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
+				queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+		} else {
+			/*
+			 * Cancel the work if quiet_vmstat has put this
+			 * cpu on cpu_stat_off because the work item might
+			 * be still scheduled
+			 */
+			cancel_delayed_work(dw);
+		}
+	}
 	put_online_cpus();
 
 	schedule_delayed_work(&shepherd,
 		round_jiffies_relative(sysctl_stat_interval));
-
 }
 
 static void __init start_shepherd_timer(void)
@@ -1488,7 +1512,7 @@
 	int cpu;
 
 	for_each_possible_cpu(cpu)
-		INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
+		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
 			vmstat_update);
 
 	if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 393bfb2..5fcfb98 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -403,6 +403,7 @@
  * @local_retries: localized retries
  * @local_fallback_retries: localized fallback retries
  * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
+ * @stable: stable mode starts rep=0 in the recursive call for all replicas
  * @vary_r: pass r to recursive calls
  * @out2: second output vector for leaf items (if @recurse_to_leaf)
  * @parent_r: r value passed from the parent
@@ -419,6 +420,7 @@
 			       unsigned int local_fallback_retries,
 			       int recurse_to_leaf,
 			       unsigned int vary_r,
+			       unsigned int stable,
 			       int *out2,
 			       int parent_r)
 {
@@ -433,13 +435,13 @@
 	int collide, reject;
 	int count = out_size;
 
-	dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n",
+	dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
 		recurse_to_leaf ? "_LEAF" : "",
 		bucket->id, x, outpos, numrep,
 		tries, recurse_tries, local_retries, local_fallback_retries,
-		parent_r);
+		parent_r, stable);
 
-	for (rep = outpos; rep < numrep && count > 0 ; rep++) {
+	for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) {
 		/* keep trying until we get a non-out, non-colliding item */
 		ftotal = 0;
 		skip_rep = 0;
@@ -512,13 +514,14 @@
 						if (crush_choose_firstn(map,
 							 map->buckets[-1-item],
 							 weight, weight_max,
-							 x, outpos+1, 0,
+							 x, stable ? 1 : outpos+1, 0,
 							 out2, outpos, count,
 							 recurse_tries, 0,
 							 local_retries,
 							 local_fallback_retries,
 							 0,
 							 vary_r,
+							 stable,
 							 NULL,
 							 sub_r) <= outpos)
 							/* didn't get leaf */
@@ -816,6 +819,7 @@
 	int choose_local_fallback_retries = map->choose_local_fallback_tries;
 
 	int vary_r = map->chooseleaf_vary_r;
+	int stable = map->chooseleaf_stable;
 
 	if ((__u32)ruleno >= map->max_rules) {
 		dprintk(" bad ruleno %d\n", ruleno);
@@ -835,7 +839,8 @@
 		case CRUSH_RULE_TAKE:
 			if ((curstep->arg1 >= 0 &&
 			     curstep->arg1 < map->max_devices) ||
-			    (-1-curstep->arg1 < map->max_buckets &&
+			    (-1-curstep->arg1 >= 0 &&
+			     -1-curstep->arg1 < map->max_buckets &&
 			     map->buckets[-1-curstep->arg1])) {
 				w[0] = curstep->arg1;
 				wsize = 1;
@@ -869,6 +874,11 @@
 				vary_r = curstep->arg1;
 			break;
 
+		case CRUSH_RULE_SET_CHOOSELEAF_STABLE:
+			if (curstep->arg1 >= 0)
+				stable = curstep->arg1;
+			break;
+
 		case CRUSH_RULE_CHOOSELEAF_FIRSTN:
 		case CRUSH_RULE_CHOOSE_FIRSTN:
 			firstn = 1;
@@ -888,6 +898,7 @@
 			osize = 0;
 
 			for (i = 0; i < wsize; i++) {
+				int bno;
 				/*
 				 * see CRUSH_N, CRUSH_N_MINUS macros.
 				 * basically, numrep <= 0 means relative to
@@ -900,6 +911,13 @@
 						continue;
 				}
 				j = 0;
+				/* make sure bucket id is valid */
+				bno = -1 - w[i];
+				if (bno < 0 || bno >= map->max_buckets) {
+					/* w[i] is probably CRUSH_ITEM_NONE */
+					dprintk("  bad w[i] %d\n", w[i]);
+					continue;
+				}
 				if (firstn) {
 					int recurse_tries;
 					if (choose_leaf_tries)
@@ -911,7 +929,7 @@
 						recurse_tries = choose_tries;
 					osize += crush_choose_firstn(
 						map,
-						map->buckets[-1-w[i]],
+						map->buckets[bno],
 						weight, weight_max,
 						x, numrep,
 						curstep->arg2,
@@ -923,6 +941,7 @@
 						choose_local_fallback_retries,
 						recurse_to_leaf,
 						vary_r,
+						stable,
 						c+osize,
 						0);
 				} else {
@@ -930,7 +949,7 @@
 						    numrep : (result_max-osize));
 					crush_choose_indep(
 						map,
-						map->buckets[-1-w[i]],
+						map->buckets[bno],
 						weight, weight_max,
 						x, out_size, numrep,
 						curstep->arg2,
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index f8f2359..3534e12 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1770,6 +1770,7 @@
 	u32 osdmap_epoch;
 	int already_completed;
 	u32 bytes;
+	u8 decode_redir;
 	unsigned int i;
 
 	tid = le64_to_cpu(msg->hdr.tid);
@@ -1841,6 +1842,15 @@
 		p += 8 + 4; /* skip replay_version */
 		p += 8; /* skip user_version */
 
+		if (le16_to_cpu(msg->hdr.version) >= 7)
+			ceph_decode_8_safe(&p, end, decode_redir, bad_put);
+		else
+			decode_redir = 1;
+	} else {
+		decode_redir = 0;
+	}
+
+	if (decode_redir) {
 		err = ceph_redirect_decode(&p, end, &redir);
 		if (err)
 			goto bad_put;
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 7d8f581..243574c 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -342,23 +342,32 @@
         c->choose_local_tries = ceph_decode_32(p);
         c->choose_local_fallback_tries =  ceph_decode_32(p);
         c->choose_total_tries = ceph_decode_32(p);
-        dout("crush decode tunable choose_local_tries = %d",
+        dout("crush decode tunable choose_local_tries = %d\n",
              c->choose_local_tries);
-        dout("crush decode tunable choose_local_fallback_tries = %d",
+        dout("crush decode tunable choose_local_fallback_tries = %d\n",
              c->choose_local_fallback_tries);
-        dout("crush decode tunable choose_total_tries = %d",
+        dout("crush decode tunable choose_total_tries = %d\n",
              c->choose_total_tries);
 
 	ceph_decode_need(p, end, sizeof(u32), done);
 	c->chooseleaf_descend_once = ceph_decode_32(p);
-	dout("crush decode tunable chooseleaf_descend_once = %d",
+	dout("crush decode tunable chooseleaf_descend_once = %d\n",
 	     c->chooseleaf_descend_once);
 
 	ceph_decode_need(p, end, sizeof(u8), done);
 	c->chooseleaf_vary_r = ceph_decode_8(p);
-	dout("crush decode tunable chooseleaf_vary_r = %d",
+	dout("crush decode tunable chooseleaf_vary_r = %d\n",
 	     c->chooseleaf_vary_r);
 
+	/* skip straw_calc_version, allowed_bucket_algs */
+	ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
+	*p += sizeof(u8) + sizeof(u32);
+
+	ceph_decode_need(p, end, sizeof(u8), done);
+	c->chooseleaf_stable = ceph_decode_8(p);
+	dout("crush decode tunable chooseleaf_stable = %d\n",
+	     c->chooseleaf_stable);
+
 done:
 	dout("crush_decode success\n");
 	return c;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d79699c..eab81bc 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -208,7 +208,6 @@
 	case htons(ETH_P_IPV6): {
 		const struct ipv6hdr *iph;
 		struct ipv6hdr _iph;
-		__be32 flow_label;
 
 ipv6:
 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
@@ -230,8 +229,12 @@
 			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
 		}
 
-		flow_label = ip6_flowlabel(iph);
-		if (flow_label) {
+		if ((dissector_uses_key(flow_dissector,
+					FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
+		     (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
+		    ip6_flowlabel(iph)) {
+			__be32 flow_label = ip6_flowlabel(iph);
+
 			if (dissector_uses_key(flow_dissector,
 					       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
 				key_tags = skb_flow_dissector_target(flow_dissector,
diff --git a/net/core/scm.c b/net/core/scm.c
index 14596fb..2696aef 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -87,6 +87,7 @@
 		*fplp = fpl;
 		fpl->count = 0;
 		fpl->max = SCM_MAX_FD;
+		fpl->user = NULL;
 	}
 	fpp = &fpl->fp[fpl->count];
 
@@ -107,6 +108,10 @@
 		*fpp++ = file;
 		fpl->count++;
 	}
+
+	if (!fpl->user)
+		fpl->user = get_uid(current_user());
+
 	return num;
 }
 
@@ -119,6 +124,7 @@
 		scm->fp = NULL;
 		for (i=fpl->count-1; i>=0; i--)
 			fput(fpl->fp[i]);
+		free_uid(fpl->user);
 		kfree(fpl);
 	}
 }
@@ -336,6 +342,7 @@
 		for (i = 0; i < fpl->count; i++)
 			get_file(fpl->fp[i]);
 		new_fpl->max = new_fpl->count;
+		new_fpl->user = get_uid(fpl->user);
 	}
 	return new_fpl;
 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b2df375..5bf88f5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,8 @@
 
 struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
+EXPORT_SYMBOL(sysctl_max_skb_frags);
 
 /**
  *	skb_panic - private function for out-of-line support
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 95b6139..a6beb7b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -26,6 +26,7 @@
 static int one = 1;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
+static int max_skb_frags = MAX_SKB_FRAGS;
 
 static int net_msg_warn;	/* Unused, but still a sysctl */
 
@@ -392,6 +393,15 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
+	{
+		.procname	= "max_skb_frags",
+		.data		= &sysctl_max_skb_frags,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &max_skb_frags,
+	},
 	{ }
 };
 
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 7c51c4e..56fdf4e0d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1240,6 +1240,14 @@
 	err = ipgre_newlink(net, dev, tb, NULL);
 	if (err < 0)
 		goto out;
+
+	/* openvswitch users expect packet sizes to be unrestricted,
+	 * so set the largest MTU we can.
+	 */
+	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
+	if (err)
+		goto out;
+
 	return dev;
 out:
 	free_netdev(dev);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c7bd72e..89e8861 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -943,17 +943,31 @@
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
 
-int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+	int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
 
-	if (new_mtu < 68 ||
-	    new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
+	if (new_mtu < 68)
 		return -EINVAL;
+
+	if (new_mtu > max_mtu) {
+		if (strict)
+			return -EINVAL;
+
+		new_mtu = max_mtu;
+	}
+
 	dev->mtu = new_mtu;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
+
+int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+	return __ip_tunnel_change_mtu(dev, new_mtu, true);
+}
 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
 
 static void ip_tunnel_dev_free(struct net_device *dev)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19746b3..0c36ef4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -940,7 +940,7 @@
 
 		i = skb_shinfo(skb)->nr_frags;
 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
-		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+		if (!can_coalesce && i >= sysctl_max_skb_frags) {
 			tcp_mark_push(tp, skb);
 			goto new_segment;
 		}
@@ -1213,7 +1213,7 @@
 
 			if (!skb_can_coalesce(skb, i, pfrag->page,
 					      pfrag->offset)) {
-				if (i == MAX_SKB_FRAGS || !sg) {
+				if (i == sysctl_max_skb_frags || !sg) {
 					tcp_mark_push(tp, skb);
 					goto new_segment;
 				}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a4d5237..7f6ff03 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -311,7 +311,7 @@
 
 
 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
-void tcp_req_err(struct sock *sk, u32 seq)
+void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 {
 	struct request_sock *req = inet_reqsk(sk);
 	struct net *net = sock_net(sk);
@@ -323,7 +323,7 @@
 
 	if (seq != tcp_rsk(req)->snt_isn) {
 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-	} else {
+	} else if (abort) {
 		/*
 		 * Still in SYN_RECV, just remove it silently.
 		 * There is no good way to pass the error to the newly
@@ -383,7 +383,12 @@
 	}
 	seq = ntohl(th->seq);
 	if (sk->sk_state == TCP_NEW_SYN_RECV)
-		return tcp_req_err(sk, seq);
+		return tcp_req_err(sk, seq,
+				  type == ICMP_PARAMETERPROB ||
+				  type == ICMP_TIME_EXCEEDED ||
+				  (type == ICMP_DEST_UNREACH &&
+				   (code == ICMP_NET_UNREACH ||
+				    code == ICMP_HOST_UNREACH)));
 
 	bh_lock_sock(sk);
 	/* If too many ICMPs get dropped on busy
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 38eedde..9efd9ff 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3538,6 +3538,7 @@
 {
 	struct inet6_dev *idev = ifp->idev;
 	struct net_device *dev = idev->dev;
+	bool notify = false;
 
 	addrconf_join_solict(dev, &ifp->addr);
 
@@ -3583,7 +3584,7 @@
 			/* Because optimistic nodes can use this address,
 			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
 			 */
-			ipv6_ifa_notify(RTM_NEWADDR, ifp);
+			notify = true;
 		}
 	}
 
@@ -3591,6 +3592,8 @@
 out:
 	spin_unlock(&ifp->lock);
 	read_unlock_bh(&idev->lock);
+	if (notify)
+		ipv6_ifa_notify(RTM_NEWADDR, ifp);
 }
 
 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 1f9ebe3..dc2db4f 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -540,12 +540,13 @@
 		}
 		spin_lock_bh(&ip6_sk_fl_lock);
 		for (sflp = &np->ipv6_fl_list;
-		     (sfl = rcu_dereference(*sflp)) != NULL;
+		     (sfl = rcu_dereference_protected(*sflp,
+						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
 		     sflp = &sfl->next) {
 			if (sfl->fl->label == freq.flr_label) {
 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
-				*sflp = rcu_dereference(sfl->next);
+				*sflp = sfl->next;
 				spin_unlock_bh(&ip6_sk_fl_lock);
 				fl_release(sfl->fl);
 				kfree_rcu(sfl, rcu);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 006396e..1a5a70f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -327,6 +327,7 @@
 	struct tcp_sock *tp;
 	__u32 seq, snd_una;
 	struct sock *sk;
+	bool fatal;
 	int err;
 
 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
@@ -345,8 +346,9 @@
 		return;
 	}
 	seq = ntohl(th->seq);
+	fatal = icmpv6_err_convert(type, code, &err);
 	if (sk->sk_state == TCP_NEW_SYN_RECV)
-		return tcp_req_err(sk, seq);
+		return tcp_req_err(sk, seq, fatal);
 
 	bh_lock_sock(sk);
 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -400,7 +402,6 @@
 		goto out;
 	}
 
-	icmpv6_err_convert(type, code, &err);
 
 	/* Might be for an request_sock */
 	switch (sk->sk_state) {
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 1605691..de9cb19 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -91,6 +91,8 @@
 	struct vxlan_config conf = {
 		.no_share = true,
 		.flags = VXLAN_F_COLLECT_METADATA,
+		/* Don't restrict the packets that can be sent by MTU */
+		.mtu = IP_MAX_MTU,
 	};
 
 	if (!options) {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5ca2ebf..e878da0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5538,6 +5538,7 @@
 	struct sctp_hmac_algo_param *hmacs;
 	__u16 data_len = 0;
 	u32 num_idents;
+	int i;
 
 	if (!ep->auth_enable)
 		return -EACCES;
@@ -5555,8 +5556,12 @@
 		return -EFAULT;
 	if (put_user(num_idents, &p->shmac_num_idents))
 		return -EFAULT;
-	if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
-		return -EFAULT;
+	for (i = 0; i < num_idents; i++) {
+		__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
+
+		if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
+			return -EFAULT;
+	}
 	return 0;
 }
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 49d5093..29be035 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1496,7 +1496,7 @@
 	UNIXCB(skb).fp = NULL;
 
 	for (i = scm->fp->count-1; i >= 0; i--)
-		unix_notinflight(scm->fp->fp[i]);
+		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
 }
 
 static void unix_destruct_scm(struct sk_buff *skb)
@@ -1561,7 +1561,7 @@
 		return -ENOMEM;
 
 	for (i = scm->fp->count - 1; i >= 0; i--)
-		unix_inflight(scm->fp->fp[i]);
+		unix_inflight(scm->fp->user, scm->fp->fp[i]);
 	return max_level;
 }
 
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 8fcdc22..6a0d485 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -116,7 +116,7 @@
  * descriptor if it is for an AF_UNIX socket.
  */
 
-void unix_inflight(struct file *fp)
+void unix_inflight(struct user_struct *user, struct file *fp)
 {
 	struct sock *s = unix_get_socket(fp);
 
@@ -133,11 +133,11 @@
 		}
 		unix_tot_inflight++;
 	}
-	fp->f_cred->user->unix_inflight++;
+	user->unix_inflight++;
 	spin_unlock(&unix_gc_lock);
 }
 
-void unix_notinflight(struct file *fp)
+void unix_notinflight(struct user_struct *user, struct file *fp)
 {
 	struct sock *s = unix_get_socket(fp);
 
@@ -152,7 +152,7 @@
 			list_del_init(&u->link);
 		unix_tot_inflight--;
 	}
-	fp->f_cred->user->unix_inflight--;
+	user->unix_inflight--;
 	spin_unlock(&unix_gc_lock);
 }
 
diff --git a/scripts/prune-kernel b/scripts/prune-kernel
new file mode 100755
index 0000000..ab5034e
--- /dev/null
+++ b/scripts/prune-kernel
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# because I use CONFIG_LOCALVERSION_AUTO, not the same version again and
+# again, /boot and /lib/modules/ eventually fill up.
+# Dumb script to purge that stuff:
+
+for f in "$@"
+do
+        if rpm -qf "/lib/modules/$f" >/dev/null; then
+                echo "keeping $f (installed from rpm)"
+        elif [ $(uname -r) = "$f" ]; then
+                echo "keeping $f (running kernel) "
+        else
+                echo "removing $f"
+                rm -f "/boot/initramfs-$f.img" "/boot/System.map-$f"
+                rm -f "/boot/vmlinuz-$f"   "/boot/config-$f"
+                rm -rf "/lib/modules/$f"
+                new-kernel-pkg --remove $f
+        fi
+done
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index f716025..e6ea9d4 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -23,6 +23,7 @@
 #include <linux/integrity.h>
 #include <linux/evm.h>
 #include <crypto/hash.h>
+#include <crypto/algapi.h>
 #include "evm.h"
 
 int evm_initialized;
@@ -148,7 +149,7 @@
 				   xattr_value_len, calc.digest);
 		if (rc)
 			break;
-		rc = memcmp(xattr_data->digest, calc.digest,
+		rc = crypto_memneq(xattr_data->digest, calc.digest,
 			    sizeof(calc.digest));
 		if (rc)
 			rc = -EINVAL;
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 2bbb418..8495b93 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -83,6 +83,7 @@
 	{ TCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
 	{ DCCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
 	{ SOCK_DIAG_BY_FAMILY,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+	{ SOCK_DESTROY,		NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
 };
 
 static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 0e73d03..ebc9fdf 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -835,7 +835,8 @@
 	return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
 }
 
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+				     bool trylock)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct snd_pcm_hw_params *params, *sparams;
@@ -849,7 +850,10 @@
 	struct snd_mask sformat_mask;
 	struct snd_mask mask;
 
-	if (mutex_lock_interruptible(&runtime->oss.params_lock))
+	if (trylock) {
+		if (!(mutex_trylock(&runtime->oss.params_lock)))
+			return -EAGAIN;
+	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
 		return -EINTR;
 	sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
 	params = kmalloc(sizeof(*params), GFP_KERNEL);
@@ -1092,7 +1096,7 @@
 		if (asubstream == NULL)
 			asubstream = substream;
 		if (substream->runtime->oss.params) {
-			err = snd_pcm_oss_change_params(substream);
+			err = snd_pcm_oss_change_params(substream, false);
 			if (err < 0)
 				return err;
 		}
@@ -1132,7 +1136,7 @@
 		return 0;
 	runtime = substream->runtime;
 	if (runtime->oss.params) {
-		err = snd_pcm_oss_change_params(substream);
+		err = snd_pcm_oss_change_params(substream, false);
 		if (err < 0)
 			return err;
 	}
@@ -2163,7 +2167,7 @@
 	runtime = substream->runtime;
 
 	if (runtime->oss.params &&
-	    (err = snd_pcm_oss_change_params(substream)) < 0)
+	    (err = snd_pcm_oss_change_params(substream, false)) < 0)
 		return err;
 
 	info.fragsize = runtime->oss.period_bytes;
@@ -2804,7 +2808,12 @@
 		return -EIO;
 	
 	if (runtime->oss.params) {
-		if ((err = snd_pcm_oss_change_params(substream)) < 0)
+		/* use mutex_trylock() for params_lock for avoiding a deadlock
+		 * between mmap_sem and params_lock taken by
+		 * copy_from/to_user() in snd_pcm_oss_write/read()
+		 */
+		err = snd_pcm_oss_change_params(substream, true);
+		if (err < 0)
 			return err;
 	}
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index fadd3eb..9106d8e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -74,6 +74,18 @@
 static DEFINE_RWLOCK(snd_pcm_link_rwlock);
 static DECLARE_RWSEM(snd_pcm_link_rwsem);
 
+/* Writer in rwsem may block readers even during its waiting in queue,
+ * and this may lead to a deadlock when the code path takes read sem
+ * twice (e.g. one in snd_pcm_action_nonatomic() and another in
+ * snd_pcm_stream_lock()).  As a (suboptimal) workaround, let writer to
+ * spin until it gets the lock.
+ */
+static inline void down_write_nonblock(struct rw_semaphore *lock)
+{
+	while (!down_write_trylock(lock))
+		cond_resched();
+}
+
 /**
  * snd_pcm_stream_lock - Lock the PCM stream
  * @substream: PCM substream
@@ -1813,7 +1825,7 @@
 		res = -ENOMEM;
 		goto _nolock;
 	}
-	down_write(&snd_pcm_link_rwsem);
+	down_write_nonblock(&snd_pcm_link_rwsem);
 	write_lock_irq(&snd_pcm_link_rwlock);
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
 	    substream->runtime->status->state != substream1->runtime->status->state ||
@@ -1860,7 +1872,7 @@
 	struct snd_pcm_substream *s;
 	int res = 0;
 
-	down_write(&snd_pcm_link_rwsem);
+	down_write_nonblock(&snd_pcm_link_rwsem);
 	write_lock_irq(&snd_pcm_link_rwlock);
 	if (!snd_pcm_stream_linked(substream)) {
 		res = -EALREADY;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index a775984..795437b 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -942,31 +942,36 @@
 	unsigned long flags;
 	long result = 0, count1;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	unsigned long appl_ptr;
 
+	spin_lock_irqsave(&runtime->lock, flags);
 	while (count > 0 && runtime->avail) {
 		count1 = runtime->buffer_size - runtime->appl_ptr;
 		if (count1 > count)
 			count1 = count;
-		spin_lock_irqsave(&runtime->lock, flags);
 		if (count1 > (int)runtime->avail)
 			count1 = runtime->avail;
+
+		/* update runtime->appl_ptr before unlocking for userbuf */
+		appl_ptr = runtime->appl_ptr;
+		runtime->appl_ptr += count1;
+		runtime->appl_ptr %= runtime->buffer_size;
+		runtime->avail -= count1;
+
 		if (kernelbuf)
-			memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
+			memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
 		if (userbuf) {
 			spin_unlock_irqrestore(&runtime->lock, flags);
 			if (copy_to_user(userbuf + result,
-					 runtime->buffer + runtime->appl_ptr, count1)) {
+					 runtime->buffer + appl_ptr, count1)) {
 				return result > 0 ? result : -EFAULT;
 			}
 			spin_lock_irqsave(&runtime->lock, flags);
 		}
-		runtime->appl_ptr += count1;
-		runtime->appl_ptr %= runtime->buffer_size;
-		runtime->avail -= count1;
-		spin_unlock_irqrestore(&runtime->lock, flags);
 		result += count1;
 		count -= count1;
 	}
+	spin_unlock_irqrestore(&runtime->lock, flags);
 	return result;
 }
 
@@ -1055,23 +1060,16 @@
 EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
 
 /**
- * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
  * @substream: the rawmidi substream
  * @buffer: the buffer pointer
  * @count: data size to transfer
  *
- * Copies data from the internal output buffer to the given buffer.
- *
- * Call this in the interrupt handler when the midi output is ready,
- * and call snd_rawmidi_transmit_ack() after the transmission is
- * finished.
- *
- * Return: The size of copied data, or a negative error code on failure.
+ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
  */
-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
 			      unsigned char *buffer, int count)
 {
-	unsigned long flags;
 	int result, count1;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
 
@@ -1081,7 +1079,6 @@
 		return -EINVAL;
 	}
 	result = 0;
-	spin_lock_irqsave(&runtime->lock, flags);
 	if (runtime->avail >= runtime->buffer_size) {
 		/* warning: lowlevel layer MUST trigger down the hardware */
 		goto __skip;
@@ -1106,12 +1103,68 @@
 		}
 	}
       __skip:
+	return result;
+}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
+
+/**
+ * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * @substream: the rawmidi substream
+ * @buffer: the buffer pointer
+ * @count: data size to transfer
+ *
+ * Copies data from the internal output buffer to the given buffer.
+ *
+ * Call this in the interrupt handler when the midi output is ready,
+ * and call snd_rawmidi_transmit_ack() after the transmission is
+ * finished.
+ *
+ * Return: The size of copied data, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+			      unsigned char *buffer, int count)
+{
+	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&runtime->lock, flags);
+	result = __snd_rawmidi_transmit_peek(substream, buffer, count);
 	spin_unlock_irqrestore(&runtime->lock, flags);
 	return result;
 }
 EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
 
 /**
+ * __snd_rawmidi_transmit_ack - acknowledge the transmission
+ * @substream: the rawmidi substream
+ * @count: the transferred count
+ *
+ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
+ */
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+{
+	struct snd_rawmidi_runtime *runtime = substream->runtime;
+
+	if (runtime->buffer == NULL) {
+		rmidi_dbg(substream->rmidi,
+			  "snd_rawmidi_transmit_ack: output is not active!!!\n");
+		return -EINVAL;
+	}
+	snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
+	runtime->hw_ptr += count;
+	runtime->hw_ptr %= runtime->buffer_size;
+	runtime->avail += count;
+	substream->bytes += count;
+	if (count > 0) {
+		if (runtime->drain || snd_rawmidi_ready(substream))
+			wake_up(&runtime->sleep);
+	}
+	return count;
+}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
+
+/**
  * snd_rawmidi_transmit_ack - acknowledge the transmission
  * @substream: the rawmidi substream
  * @count: the transferred count
@@ -1124,26 +1177,14 @@
  */
 int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
 {
-	unsigned long flags;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	int result;
+	unsigned long flags;
 
-	if (runtime->buffer == NULL) {
-		rmidi_dbg(substream->rmidi,
-			  "snd_rawmidi_transmit_ack: output is not active!!!\n");
-		return -EINVAL;
-	}
 	spin_lock_irqsave(&runtime->lock, flags);
-	snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
-	runtime->hw_ptr += count;
-	runtime->hw_ptr %= runtime->buffer_size;
-	runtime->avail += count;
-	substream->bytes += count;
-	if (count > 0) {
-		if (runtime->drain || snd_rawmidi_ready(substream))
-			wake_up(&runtime->sleep);
-	}
+	result = __snd_rawmidi_transmit_ack(substream, count);
 	spin_unlock_irqrestore(&runtime->lock, flags);
-	return count;
+	return result;
 }
 EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
 
@@ -1160,12 +1201,22 @@
 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
 			 unsigned char *buffer, int count)
 {
+	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&runtime->lock, flags);
 	if (!substream->opened)
-		return -EBADFD;
-	count = snd_rawmidi_transmit_peek(substream, buffer, count);
-	if (count < 0)
-		return count;
-	return snd_rawmidi_transmit_ack(substream, count);
+		result = -EBADFD;
+	else {
+		count = __snd_rawmidi_transmit_peek(substream, buffer, count);
+		if (count <= 0)
+			result = count;
+		else
+			result = __snd_rawmidi_transmit_ack(substream, count);
+	}
+	spin_unlock_irqrestore(&runtime->lock, flags);
+	return result;
 }
 EXPORT_SYMBOL(snd_rawmidi_transmit);
 
@@ -1177,8 +1228,9 @@
 	unsigned long flags;
 	long count1, result;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	unsigned long appl_ptr;
 
-	if (snd_BUG_ON(!kernelbuf && !userbuf))
+	if (!kernelbuf && !userbuf)
 		return -EINVAL;
 	if (snd_BUG_ON(!runtime->buffer))
 		return -EINVAL;
@@ -1197,12 +1249,19 @@
 			count1 = count;
 		if (count1 > (long)runtime->avail)
 			count1 = runtime->avail;
+
+		/* update runtime->appl_ptr before unlocking for userbuf */
+		appl_ptr = runtime->appl_ptr;
+		runtime->appl_ptr += count1;
+		runtime->appl_ptr %= runtime->buffer_size;
+		runtime->avail -= count1;
+
 		if (kernelbuf)
-			memcpy(runtime->buffer + runtime->appl_ptr,
+			memcpy(runtime->buffer + appl_ptr,
 			       kernelbuf + result, count1);
 		else if (userbuf) {
 			spin_unlock_irqrestore(&runtime->lock, flags);
-			if (copy_from_user(runtime->buffer + runtime->appl_ptr,
+			if (copy_from_user(runtime->buffer + appl_ptr,
 					   userbuf + result, count1)) {
 				spin_lock_irqsave(&runtime->lock, flags);
 				result = result > 0 ? result : -EFAULT;
@@ -1210,9 +1269,6 @@
 			}
 			spin_lock_irqsave(&runtime->lock, flags);
 		}
-		runtime->appl_ptr += count1;
-		runtime->appl_ptr %= runtime->buffer_size;
-		runtime->avail -= count1;
 		result += count1;
 		count -= count1;
 	}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 13cfa81..58e79e0 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -678,6 +678,9 @@
 	else
 		down_read(&grp->list_mutex);
 	list_for_each_entry(subs, &grp->list_head, src_list) {
+		/* both ports ready? */
+		if (atomic_read(&subs->ref_count) != 2)
+			continue;
 		event->dest = subs->info.dest;
 		if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
 			/* convert time according to flag with subscription */
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 8010766..c850345 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -383,15 +383,20 @@
 
 	if (snd_BUG_ON(!pool))
 		return -EINVAL;
-	if (pool->ptr)			/* should be atomic? */
-		return 0;
 
-	pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
-	if (!pool->ptr)
+	cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+	if (!cellptr)
 		return -ENOMEM;
 
 	/* add new cells to the free cell list */
 	spin_lock_irqsave(&pool->lock, flags);
+	if (pool->ptr) {
+		spin_unlock_irqrestore(&pool->lock, flags);
+		vfree(cellptr);
+		return 0;
+	}
+
+	pool->ptr = cellptr;
 	pool->free = NULL;
 
 	for (cell = 0; cell < pool->size; cell++) {
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 55170a2..fe686ee 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -173,10 +173,6 @@
 }
 
 /* */
-enum group_type {
-	SRC_LIST, DEST_LIST
-};
-
 static int subscribe_port(struct snd_seq_client *client,
 			  struct snd_seq_client_port *port,
 			  struct snd_seq_port_subs_info *grp,
@@ -203,6 +199,20 @@
 	return NULL;
 }
 
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+					struct snd_seq_client_port *port,
+					struct snd_seq_subscribers *subs,
+					bool is_src, bool ack);
+
+static inline struct snd_seq_subscribers *
+get_subscriber(struct list_head *p, bool is_src)
+{
+	if (is_src)
+		return list_entry(p, struct snd_seq_subscribers, src_list);
+	else
+		return list_entry(p, struct snd_seq_subscribers, dest_list);
+}
+
 /*
  * remove all subscribers on the list
  * this is called from port_delete, for each src and dest list.
@@ -210,7 +220,7 @@
 static void clear_subscriber_list(struct snd_seq_client *client,
 				  struct snd_seq_client_port *port,
 				  struct snd_seq_port_subs_info *grp,
-				  int grptype)
+				  int is_src)
 {
 	struct list_head *p, *n;
 
@@ -219,15 +229,13 @@
 		struct snd_seq_client *c;
 		struct snd_seq_client_port *aport;
 
-		if (grptype == SRC_LIST) {
-			subs = list_entry(p, struct snd_seq_subscribers, src_list);
+		subs = get_subscriber(p, is_src);
+		if (is_src)
 			aport = get_client_port(&subs->info.dest, &c);
-		} else {
-			subs = list_entry(p, struct snd_seq_subscribers, dest_list);
+		else
 			aport = get_client_port(&subs->info.sender, &c);
-		}
-		list_del(p);
-		unsubscribe_port(client, port, grp, &subs->info, 0);
+		delete_and_unsubscribe_port(client, port, subs, is_src, false);
+
 		if (!aport) {
 			/* looks like the connected port is being deleted.
 			 * we decrease the counter, and when both ports are deleted
@@ -235,21 +243,14 @@
 			 */
 			if (atomic_dec_and_test(&subs->ref_count))
 				kfree(subs);
-		} else {
-			/* ok we got the connected port */
-			struct snd_seq_port_subs_info *agrp;
-			agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
-			down_write(&agrp->list_mutex);
-			if (grptype == SRC_LIST)
-				list_del(&subs->dest_list);
-			else
-				list_del(&subs->src_list);
-			up_write(&agrp->list_mutex);
-			unsubscribe_port(c, aport, agrp, &subs->info, 1);
-			kfree(subs);
-			snd_seq_port_unlock(aport);
-			snd_seq_client_unlock(c);
+			continue;
 		}
+
+		/* ok we got the connected port */
+		delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
+		kfree(subs);
+		snd_seq_port_unlock(aport);
+		snd_seq_client_unlock(c);
 	}
 }
 
@@ -262,8 +263,8 @@
 	snd_use_lock_sync(&port->use_lock); 
 
 	/* clear subscribers info */
-	clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
-	clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
+	clear_subscriber_list(client, port, &port->c_src, true);
+	clear_subscriber_list(client, port, &port->c_dest, false);
 
 	if (port->private_free)
 		port->private_free(port->private_data);
@@ -479,6 +480,78 @@
 	return 0;
 }
 
+static int check_and_subscribe_port(struct snd_seq_client *client,
+				    struct snd_seq_client_port *port,
+				    struct snd_seq_subscribers *subs,
+				    bool is_src, bool exclusive, bool ack)
+{
+	struct snd_seq_port_subs_info *grp;
+	struct list_head *p;
+	struct snd_seq_subscribers *s;
+	int err;
+
+	grp = is_src ? &port->c_src : &port->c_dest;
+	err = -EBUSY;
+	down_write(&grp->list_mutex);
+	if (exclusive) {
+		if (!list_empty(&grp->list_head))
+			goto __error;
+	} else {
+		if (grp->exclusive)
+			goto __error;
+		/* check whether already exists */
+		list_for_each(p, &grp->list_head) {
+			s = get_subscriber(p, is_src);
+			if (match_subs_info(&subs->info, &s->info))
+				goto __error;
+		}
+	}
+
+	err = subscribe_port(client, port, grp, &subs->info, ack);
+	if (err < 0) {
+		grp->exclusive = 0;
+		goto __error;
+	}
+
+	/* add to list */
+	write_lock_irq(&grp->list_lock);
+	if (is_src)
+		list_add_tail(&subs->src_list, &grp->list_head);
+	else
+		list_add_tail(&subs->dest_list, &grp->list_head);
+	grp->exclusive = exclusive;
+	atomic_inc(&subs->ref_count);
+	write_unlock_irq(&grp->list_lock);
+	err = 0;
+
+ __error:
+	up_write(&grp->list_mutex);
+	return err;
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+					struct snd_seq_client_port *port,
+					struct snd_seq_subscribers *subs,
+					bool is_src, bool ack)
+{
+	struct snd_seq_port_subs_info *grp;
+	struct list_head *list;
+	bool empty;
+
+	grp = is_src ? &port->c_src : &port->c_dest;
+	list = is_src ? &subs->src_list : &subs->dest_list;
+	down_write(&grp->list_mutex);
+	write_lock_irq(&grp->list_lock);
+	empty = list_empty(list);
+	if (!empty)
+		list_del_init(list);
+	grp->exclusive = 0;
+	write_unlock_irq(&grp->list_lock);
+	up_write(&grp->list_mutex);
+
+	if (!empty)
+		unsubscribe_port(client, port, grp, &subs->info, ack);
+}
 
 /* connect two ports */
 int snd_seq_port_connect(struct snd_seq_client *connector,
@@ -488,76 +561,42 @@
 			 struct snd_seq_client_port *dest_port,
 			 struct snd_seq_port_subscribe *info)
 {
-	struct snd_seq_port_subs_info *src = &src_port->c_src;
-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
-	struct snd_seq_subscribers *subs, *s;
-	int err, src_called = 0;
-	unsigned long flags;
-	int exclusive;
+	struct snd_seq_subscribers *subs;
+	bool exclusive;
+	int err;
 
 	subs = kzalloc(sizeof(*subs), GFP_KERNEL);
-	if (! subs)
+	if (!subs)
 		return -ENOMEM;
 
 	subs->info = *info;
-	atomic_set(&subs->ref_count, 2);
+	atomic_set(&subs->ref_count, 0);
+	INIT_LIST_HEAD(&subs->src_list);
+	INIT_LIST_HEAD(&subs->dest_list);
 
-	down_write(&src->list_mutex);
-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+	exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
 
-	exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
-	err = -EBUSY;
-	if (exclusive) {
-		if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
-			goto __error;
-	} else {
-		if (src->exclusive || dest->exclusive)
-			goto __error;
-		/* check whether already exists */
-		list_for_each_entry(s, &src->list_head, src_list) {
-			if (match_subs_info(info, &s->info))
-				goto __error;
-		}
-		list_for_each_entry(s, &dest->list_head, dest_list) {
-			if (match_subs_info(info, &s->info))
-				goto __error;
-		}
-	}
+	err = check_and_subscribe_port(src_client, src_port, subs, true,
+				       exclusive,
+				       connector->number != src_client->number);
+	if (err < 0)
+		goto error;
+	err = check_and_subscribe_port(dest_client, dest_port, subs, false,
+				       exclusive,
+				       connector->number != dest_client->number);
+	if (err < 0)
+		goto error_dest;
 
-	if ((err = subscribe_port(src_client, src_port, src, info,
-				  connector->number != src_client->number)) < 0)
-		goto __error;
-	src_called = 1;
-
-	if ((err = subscribe_port(dest_client, dest_port, dest, info,
-				  connector->number != dest_client->number)) < 0)
-		goto __error;
-
-	/* add to list */
-	write_lock_irqsave(&src->list_lock, flags);
-	// write_lock(&dest->list_lock); // no other lock yet
-	list_add_tail(&subs->src_list, &src->list_head);
-	list_add_tail(&subs->dest_list, &dest->list_head);
-	// write_unlock(&dest->list_lock); // no other lock yet
-	write_unlock_irqrestore(&src->list_lock, flags);
-
-	src->exclusive = dest->exclusive = exclusive;
-
-	up_write(&dest->list_mutex);
-	up_write(&src->list_mutex);
 	return 0;
 
- __error:
-	if (src_called)
-		unsubscribe_port(src_client, src_port, src, info,
-				 connector->number != src_client->number);
+ error_dest:
+	delete_and_unsubscribe_port(src_client, src_port, subs, true,
+				    connector->number != src_client->number);
+ error:
 	kfree(subs);
-	up_write(&dest->list_mutex);
-	up_write(&src->list_mutex);
 	return err;
 }
 
-
 /* remove the connection */
 int snd_seq_port_disconnect(struct snd_seq_client *connector,
 			    struct snd_seq_client *src_client,
@@ -567,37 +606,28 @@
 			    struct snd_seq_port_subscribe *info)
 {
 	struct snd_seq_port_subs_info *src = &src_port->c_src;
-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
 	struct snd_seq_subscribers *subs;
 	int err = -ENOENT;
-	unsigned long flags;
 
 	down_write(&src->list_mutex);
-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
 	/* look for the connection */
 	list_for_each_entry(subs, &src->list_head, src_list) {
 		if (match_subs_info(info, &subs->info)) {
-			write_lock_irqsave(&src->list_lock, flags);
-			// write_lock(&dest->list_lock);  // no lock yet
-			list_del(&subs->src_list);
-			list_del(&subs->dest_list);
-			// write_unlock(&dest->list_lock);
-			write_unlock_irqrestore(&src->list_lock, flags);
-			src->exclusive = dest->exclusive = 0;
-			unsubscribe_port(src_client, src_port, src, info,
-					 connector->number != src_client->number);
-			unsubscribe_port(dest_client, dest_port, dest, info,
-					 connector->number != dest_client->number);
-			kfree(subs);
+			atomic_dec(&subs->ref_count); /* mark as not ready */
 			err = 0;
 			break;
 		}
 	}
-
-	up_write(&dest->list_mutex);
 	up_write(&src->list_mutex);
-	return err;
+	if (err < 0)
+		return err;
+
+	delete_and_unsubscribe_port(src_client, src_port, subs, true,
+				    connector->number != src_client->number);
+	delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
+				    connector->number != dest_client->number);
+	kfree(subs);
+	return 0;
 }
 
 
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 82b220c..2931049 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -90,6 +90,9 @@
 
 void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&tmr->lock, flags);
 	/* setup defaults */
 	tmr->ppq = 96;		/* 96 PPQ */
 	tmr->tempo = 500000;	/* 120 BPM */
@@ -105,21 +108,25 @@
 	tmr->preferred_resolution = seq_default_timer_resolution;
 
 	tmr->skew = tmr->skew_base = SKEW_BASE;
+	spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
+static void seq_timer_reset(struct snd_seq_timer *tmr)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&tmr->lock, flags);
-
 	/* reset time & songposition */
 	tmr->cur_time.tv_sec = 0;
 	tmr->cur_time.tv_nsec = 0;
 
 	tmr->tick.cur_tick = 0;
 	tmr->tick.fraction = 0;
+}
 
+void snd_seq_timer_reset(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tmr->lock, flags);
+	seq_timer_reset(tmr);
 	spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
@@ -138,8 +145,11 @@
 	tmr = q->timer;
 	if (tmr == NULL)
 		return;
-	if (!tmr->running)
+	spin_lock_irqsave(&tmr->lock, flags);
+	if (!tmr->running) {
+		spin_unlock_irqrestore(&tmr->lock, flags);
 		return;
+	}
 
 	resolution *= ticks;
 	if (tmr->skew != tmr->skew_base) {
@@ -148,8 +158,6 @@
 			(((resolution & 0xffff) * tmr->skew) >> 16);
 	}
 
-	spin_lock_irqsave(&tmr->lock, flags);
-
 	/* update timer */
 	snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
 
@@ -296,26 +304,30 @@
 	t->callback = snd_seq_timer_interrupt;
 	t->callback_data = q;
 	t->flags |= SNDRV_TIMER_IFLG_AUTO;
+	spin_lock_irq(&tmr->lock);
 	tmr->timeri = t;
+	spin_unlock_irq(&tmr->lock);
 	return 0;
 }
 
 int snd_seq_timer_close(struct snd_seq_queue *q)
 {
 	struct snd_seq_timer *tmr;
+	struct snd_timer_instance *t;
 	
 	tmr = q->timer;
 	if (snd_BUG_ON(!tmr))
 		return -EINVAL;
-	if (tmr->timeri) {
-		snd_timer_stop(tmr->timeri);
-		snd_timer_close(tmr->timeri);
-		tmr->timeri = NULL;
-	}
+	spin_lock_irq(&tmr->lock);
+	t = tmr->timeri;
+	tmr->timeri = NULL;
+	spin_unlock_irq(&tmr->lock);
+	if (t)
+		snd_timer_close(t);
 	return 0;
 }
 
-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+static int seq_timer_stop(struct snd_seq_timer *tmr)
 {
 	if (! tmr->timeri)
 		return -EINVAL;
@@ -326,6 +338,17 @@
 	return 0;
 }
 
+int snd_seq_timer_stop(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&tmr->lock, flags);
+	err = seq_timer_stop(tmr);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+	return err;
+}
+
 static int initialize_timer(struct snd_seq_timer *tmr)
 {
 	struct snd_timer *t;
@@ -358,13 +381,13 @@
 	return 0;
 }
 
-int snd_seq_timer_start(struct snd_seq_timer * tmr)
+static int seq_timer_start(struct snd_seq_timer *tmr)
 {
 	if (! tmr->timeri)
 		return -EINVAL;
 	if (tmr->running)
-		snd_seq_timer_stop(tmr);
-	snd_seq_timer_reset(tmr);
+		seq_timer_stop(tmr);
+	seq_timer_reset(tmr);
 	if (initialize_timer(tmr) < 0)
 		return -EINVAL;
 	snd_timer_start(tmr->timeri, tmr->ticks);
@@ -373,14 +396,25 @@
 	return 0;
 }
 
-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+int snd_seq_timer_start(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&tmr->lock, flags);
+	err = seq_timer_start(tmr);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+	return err;
+}
+
+static int seq_timer_continue(struct snd_seq_timer *tmr)
 {
 	if (! tmr->timeri)
 		return -EINVAL;
 	if (tmr->running)
 		return -EBUSY;
 	if (! tmr->initialized) {
-		snd_seq_timer_reset(tmr);
+		seq_timer_reset(tmr);
 		if (initialize_timer(tmr) < 0)
 			return -EINVAL;
 	}
@@ -390,11 +424,24 @@
 	return 0;
 }
 
+int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&tmr->lock, flags);
+	err = seq_timer_continue(tmr);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+	return err;
+}
+
 /* return current 'real' time. use timeofday() to get better granularity. */
 snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
 {
 	snd_seq_real_time_t cur_time;
+	unsigned long flags;
 
+	spin_lock_irqsave(&tmr->lock, flags);
 	cur_time = tmr->cur_time;
 	if (tmr->running) { 
 		struct timeval tm;
@@ -410,7 +457,7 @@
 		}
 		snd_seq_sanity_real_time(&cur_time);
 	}
-                
+	spin_unlock_irqrestore(&tmr->lock, flags);
 	return cur_time;	
 }
 
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 3da2d48..c82ed3e 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -155,21 +155,26 @@
 	struct snd_virmidi *vmidi = substream->runtime->private_data;
 	int count, res;
 	unsigned char buf[32], *pbuf;
+	unsigned long flags;
 
 	if (up) {
 		vmidi->trigger = 1;
 		if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
 		    !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
-			snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
-			return;		/* ignored */
+			while (snd_rawmidi_transmit(substream, buf,
+						    sizeof(buf)) > 0) {
+				/* ignored */
+			}
+			return;
 		}
 		if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
 			if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
 				return;
 			vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
 		}
+		spin_lock_irqsave(&substream->runtime->lock, flags);
 		while (1) {
-			count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+			count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
 			if (count <= 0)
 				break;
 			pbuf = buf;
@@ -179,16 +184,18 @@
 					snd_midi_event_reset_encode(vmidi->parser);
 					continue;
 				}
-				snd_rawmidi_transmit_ack(substream, res);
+				__snd_rawmidi_transmit_ack(substream, res);
 				pbuf += res;
 				count -= res;
 				if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
 					if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
-						return;
+						goto out;
 					vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
 				}
 			}
 		}
+	out:
+		spin_unlock_irqrestore(&substream->runtime->lock, flags);
 	} else {
 		vmidi->trigger = 0;
 	}
@@ -254,9 +261,13 @@
  */
 static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
 {
+	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
 	struct snd_virmidi *vmidi = substream->runtime->private_data;
-	snd_midi_event_free(vmidi->parser);
+
+	write_lock_irq(&rdev->filelist_lock);
 	list_del(&vmidi->list);
+	write_unlock_irq(&rdev->filelist_lock);
+	snd_midi_event_free(vmidi->parser);
 	substream->runtime->private_data = NULL;
 	kfree(vmidi);
 	return 0;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index af1f68f..dca817f 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -422,7 +422,7 @@
 	spin_lock_irqsave(&timer->lock, flags);
 	list_for_each_entry(ts, &ti->slave_active_head, active_list)
 		if (ts->ccallback)
-			ts->ccallback(ti, event + 100, &tstamp, resolution);
+			ts->ccallback(ts, event + 100, &tstamp, resolution);
 	spin_unlock_irqrestore(&timer->lock, flags);
 }
 
@@ -451,6 +451,10 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&slave_active_lock, flags);
+	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+		spin_unlock_irqrestore(&slave_active_lock, flags);
+		return -EBUSY;
+	}
 	timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
 	if (timeri->master && timeri->timer) {
 		spin_lock(&timeri->timer->lock);
@@ -475,7 +479,8 @@
 		return -EINVAL;
 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
 		result = snd_timer_start_slave(timeri);
-		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+		if (result >= 0)
+			snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
 		return result;
 	}
 	timer = timeri->timer;
@@ -484,11 +489,18 @@
 	if (timer->card && timer->card->shutdown)
 		return -ENODEV;
 	spin_lock_irqsave(&timer->lock, flags);
+	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+			     SNDRV_TIMER_IFLG_START)) {
+		result = -EBUSY;
+		goto unlock;
+	}
 	timeri->ticks = timeri->cticks = ticks;
 	timeri->pticks = 0;
 	result = snd_timer_start1(timer, timeri, ticks);
+ unlock:
 	spin_unlock_irqrestore(&timer->lock, flags);
-	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+	if (result >= 0)
+		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
 	return result;
 }
 
@@ -502,9 +514,17 @@
 
 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
 		spin_lock_irqsave(&slave_active_lock, flags);
+		if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+			spin_unlock_irqrestore(&slave_active_lock, flags);
+			return -EBUSY;
+		}
+		if (timeri->timer)
+			spin_lock(&timeri->timer->lock);
 		timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
 		list_del_init(&timeri->ack_list);
 		list_del_init(&timeri->active_list);
+		if (timeri->timer)
+			spin_unlock(&timeri->timer->lock);
 		spin_unlock_irqrestore(&slave_active_lock, flags);
 		goto __end;
 	}
@@ -512,6 +532,11 @@
 	if (!timer)
 		return -EINVAL;
 	spin_lock_irqsave(&timer->lock, flags);
+	if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+			       SNDRV_TIMER_IFLG_START))) {
+		spin_unlock_irqrestore(&timer->lock, flags);
+		return -EBUSY;
+	}
 	list_del_init(&timeri->ack_list);
 	list_del_init(&timeri->active_list);
 	if (timer->card && timer->card->shutdown) {
@@ -581,10 +606,15 @@
 	if (timer->card && timer->card->shutdown)
 		return -ENODEV;
 	spin_lock_irqsave(&timer->lock, flags);
+	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+		result = -EBUSY;
+		goto unlock;
+	}
 	if (!timeri->cticks)
 		timeri->cticks = 1;
 	timeri->pticks = 0;
 	result = snd_timer_start1(timer, timeri, timer->sticks);
+ unlock:
 	spin_unlock_irqrestore(&timer->lock, flags);
 	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
 	return result;
@@ -718,8 +748,8 @@
 			ti->cticks = ti->ticks;
 		} else {
 			ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
-			if (--timer->running)
-				list_del_init(&ti->active_list);
+			--timer->running;
+			list_del_init(&ti->active_list);
 		}
 		if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
 		    (ti->flags & SNDRV_TIMER_IFLG_FAST))
@@ -1032,11 +1062,21 @@
 	return 0;
 }
 
+static int snd_timer_s_close(struct snd_timer *timer)
+{
+	struct snd_timer_system_private *priv;
+
+	priv = (struct snd_timer_system_private *)timer->private_data;
+	del_timer_sync(&priv->tlist);
+	return 0;
+}
+
 static struct snd_timer_hardware snd_timer_system =
 {
 	.flags =	SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
 	.resolution =	1000000000L / HZ,
 	.ticks =	10000000L,
+	.close =	snd_timer_s_close,
 	.start =	snd_timer_s_start,
 	.stop =		snd_timer_s_stop
 };
@@ -1893,6 +1933,7 @@
 {
 	struct snd_timer_user *tu;
 	long result = 0, unit;
+	int qhead;
 	int err = 0;
 
 	tu = file->private_data;
@@ -1904,7 +1945,7 @@
 
 			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
 				err = -EAGAIN;
-				break;
+				goto _error;
 			}
 
 			set_current_state(TASK_INTERRUPTIBLE);
@@ -1919,42 +1960,37 @@
 
 			if (tu->disconnected) {
 				err = -ENODEV;
-				break;
+				goto _error;
 			}
 			if (signal_pending(current)) {
 				err = -ERESTARTSYS;
-				break;
+				goto _error;
 			}
 		}
 
+		qhead = tu->qhead++;
+		tu->qhead %= tu->queue_size;
 		spin_unlock_irq(&tu->qlock);
-		if (err < 0)
-			goto _error;
 
 		if (tu->tread) {
-			if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
-					 sizeof(struct snd_timer_tread))) {
+			if (copy_to_user(buffer, &tu->tqueue[qhead],
+					 sizeof(struct snd_timer_tread)))
 				err = -EFAULT;
-				goto _error;
-			}
 		} else {
-			if (copy_to_user(buffer, &tu->queue[tu->qhead++],
-					 sizeof(struct snd_timer_read))) {
+			if (copy_to_user(buffer, &tu->queue[qhead],
+					 sizeof(struct snd_timer_read)))
 				err = -EFAULT;
-				goto _error;
-			}
 		}
 
-		tu->qhead %= tu->queue_size;
-
-		result += unit;
-		buffer += unit;
-
 		spin_lock_irq(&tu->qlock);
 		tu->qused--;
+		if (err < 0)
+			goto _error;
+		result += unit;
+		buffer += unit;
 	}
-	spin_unlock_irq(&tu->qlock);
  _error:
+	spin_unlock_irq(&tu->qlock);
 	return result > 0 ? result : err;
 }
 
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index bde3330..c0f8f61 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -87,7 +87,7 @@
 module_param(fake_buffer, bool, 0444);
 MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
 #ifdef CONFIG_HIGH_RES_TIMERS
-module_param(hrtimer, bool, 0444);
+module_param(hrtimer, bool, 0644);
 MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
 #endif
 
@@ -109,6 +109,9 @@
 	snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
 };
 
+#define get_dummy_ops(substream) \
+	(*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
+
 struct dummy_model {
 	const char *name;
 	int (*playback_constraints)(struct snd_pcm_runtime *runtime);
@@ -137,7 +140,6 @@
 	int iobox;
 	struct snd_kcontrol *cd_volume_ctl;
 	struct snd_kcontrol *cd_switch_ctl;
-	const struct dummy_timer_ops *timer_ops;
 };
 
 /*
@@ -231,6 +233,8 @@
  */
 
 struct dummy_systimer_pcm {
+	/* ops must be the first item */
+	const struct dummy_timer_ops *timer_ops;
 	spinlock_t lock;
 	struct timer_list timer;
 	unsigned long base_time;
@@ -366,6 +370,8 @@
  */
 
 struct dummy_hrtimer_pcm {
+	/* ops must be the first item */
+	const struct dummy_timer_ops *timer_ops;
 	ktime_t base_time;
 	ktime_t period_time;
 	atomic_t running;
@@ -492,31 +498,25 @@
 
 static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
-		return dummy->timer_ops->start(substream);
+		return get_dummy_ops(substream)->start(substream);
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
-		return dummy->timer_ops->stop(substream);
+		return get_dummy_ops(substream)->stop(substream);
 	}
 	return -EINVAL;
 }
 
 static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-	return dummy->timer_ops->prepare(substream);
+	return get_dummy_ops(substream)->prepare(substream);
 }
 
 static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-	return dummy->timer_ops->pointer(substream);
+	return get_dummy_ops(substream)->pointer(substream);
 }
 
 static struct snd_pcm_hardware dummy_pcm_hardware = {
@@ -562,17 +562,19 @@
 	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
 	struct dummy_model *model = dummy->model;
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	const struct dummy_timer_ops *ops;
 	int err;
 
-	dummy->timer_ops = &dummy_systimer_ops;
+	ops = &dummy_systimer_ops;
 #ifdef CONFIG_HIGH_RES_TIMERS
 	if (hrtimer)
-		dummy->timer_ops = &dummy_hrtimer_ops;
+		ops = &dummy_hrtimer_ops;
 #endif
 
-	err = dummy->timer_ops->create(substream);
+	err = ops->create(substream);
 	if (err < 0)
 		return err;
+	get_dummy_ops(substream) = ops;
 
 	runtime->hw = dummy->pcm_hw;
 	if (substream->pcm->device & 1) {
@@ -594,7 +596,7 @@
 			err = model->capture_constraints(substream->runtime);
 	}
 	if (err < 0) {
-		dummy->timer_ops->free(substream);
+		get_dummy_ops(substream)->free(substream);
 		return err;
 	}
 	return 0;
@@ -602,8 +604,7 @@
 
 static int dummy_pcm_close(struct snd_pcm_substream *substream)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-	dummy->timer_ops->free(substream);
+	get_dummy_ops(substream)->free(substream);
 	return 0;
 }
 
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index b02a5e8c..0ac92ab 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -63,7 +63,7 @@
 #define BYTE_PER_SAMPLE (4)
 #define MAGIC_DOT_BYTE (2)
 #define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE)
-static const u8 dot_scrt(const u8 idx, const unsigned int off)
+static u8 dot_scrt(const u8 idx, const unsigned int off)
 {
 	/*
 	 * the length of the added pattern only depends on the lower nibble
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
index 904ce03..040a96d 100644
--- a/sound/firewire/tascam/tascam-transaction.c
+++ b/sound/firewire/tascam/tascam-transaction.c
@@ -230,6 +230,7 @@
 	return err;
 error:
 	fw_core_remove_address_handler(&tscm->async_handler);
+	tscm->async_handler.callback_data = NULL;
 	return err;
 }
 
@@ -276,6 +277,9 @@
 	__be32 reg;
 	unsigned int i;
 
+	if (tscm->async_handler.callback_data == NULL)
+		return;
+
 	/* Turn off FireWire LED. */
 	reg = cpu_to_be32(0x0000008e);
 	snd_fw_transaction(tscm->unit, TCODE_WRITE_QUADLET_REQUEST,
@@ -297,6 +301,8 @@
 			   &reg, sizeof(reg), 0);
 
 	fw_core_remove_address_handler(&tscm->async_handler);
+	tscm->async_handler.callback_data = NULL;
+
 	for (i = 0; i < TSCM_MIDI_OUT_PORT_MAX; i++)
 		snd_fw_async_midi_port_destroy(&tscm->out_ports[i]);
 }
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index ee0bc18..e281c33 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -21,7 +21,6 @@
 		.pcm_playback_analog_channels = 8,
 		.midi_capture_ports = 4,
 		.midi_playback_ports = 4,
-		.is_controller = true,
 	},
 	{
 		.name = "FW-1082",
@@ -31,9 +30,16 @@
 		.pcm_playback_analog_channels = 2,
 		.midi_capture_ports = 2,
 		.midi_playback_ports = 2,
-		.is_controller = true,
 	},
-	/* FW-1804 may be supported. */
+	{
+		.name = "FW-1804",
+		.has_adat = true,
+		.has_spdif = true,
+		.pcm_capture_analog_channels = 8,
+		.pcm_playback_analog_channels = 2,
+		.midi_capture_ports = 2,
+		.midi_playback_ports = 4,
+	},
 };
 
 static int identify_model(struct snd_tscm *tscm)
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 2d028d2..30ab77e 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -39,7 +39,6 @@
 	unsigned int pcm_playback_analog_channels;
 	unsigned int midi_capture_ports;
 	unsigned int midi_playback_ports;
-	bool is_controller;
 };
 
 #define TSCM_MIDI_IN_PORT_MAX	4
@@ -72,9 +71,6 @@
 	struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX];
 	u8 running_status[TSCM_MIDI_OUT_PORT_MAX];
 	bool on_sysex[TSCM_MIDI_OUT_PORT_MAX];
-
-	/* For control messages. */
-	struct snd_firewire_tascam_status *status;
 };
 
 #define TSCM_ADDR_BASE			0xffff00000000ull
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 28e2f8b..8914534 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1141,6 +1141,14 @@
 		emu->emu1010.firmware_thread =
 			kthread_create(emu1010_firmware_thread, emu,
 				       "emu1010_firmware");
+		if (IS_ERR(emu->emu1010.firmware_thread)) {
+			err = PTR_ERR(emu->emu1010.firmware_thread);
+			emu->emu1010.firmware_thread = NULL;
+			dev_info(emu->card->dev,
+					"emu1010: Creating thread failed\n");
+			return err;
+		}
+
 		wake_up_process(emu->emu1010.firmware_thread);
 	}
 
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 30c8efe..7ca5b89 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -4028,9 +4028,9 @@
 			       struct hda_jack_callback *jack,
 			       bool on)
 {
-	if (jack && jack->tbl->nid)
+	if (jack && jack->nid)
 		sync_power_state_change(codec,
-					set_pin_power_jack(codec, jack->tbl->nid, on));
+					set_pin_power_jack(codec, jack->nid, on));
 }
 
 /* callback only doing power up -- called at first */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4045dca..ce6b97f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2168,10 +2168,10 @@
 	struct hda_intel *hda;
 
 	if (card) {
-		/* flush the pending probing work */
+		/* cancel the pending probing work */
 		chip = card->private_data;
 		hda = container_of(chip, struct hda_intel, chip);
-		flush_work(&hda->probe_work);
+		cancel_work_sync(&hda->probe_work);
 
 		snd_card_free(card);
 	}
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index c945e25..a33234e 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -259,7 +259,7 @@
 		if (!callback)
 			return ERR_PTR(-ENOMEM);
 		callback->func = func;
-		callback->tbl = jack;
+		callback->nid = jack->nid;
 		callback->next = jack->callback;
 		jack->callback = callback;
 	}
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 858708a..e9814c0 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -21,7 +21,7 @@
 typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
 
 struct hda_jack_callback {
-	struct hda_jack_tbl *tbl;
+	hda_nid_t nid;
 	hda_jack_callback_fn func;
 	unsigned int private_data;	/* arbitrary data */
 	struct hda_jack_callback *next;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 4ef2259..9ceb2bc 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -4427,13 +4427,16 @@
 static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
 {
 	struct ca0132_spec *spec = codec->spec;
+	struct hda_jack_tbl *tbl;
 
 	/* Delay enabling the HP amp, to let the mic-detection
 	 * state machine run.
 	 */
 	cancel_delayed_work_sync(&spec->unsol_hp_work);
 	schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
-	cb->tbl->block_report = 1;
+	tbl = snd_hda_jack_tbl_get(codec, cb->nid);
+	if (tbl)
+		tbl->block_report = 1;
 }
 
 static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a12ae8a..c1c855a 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -614,6 +614,7 @@
 	CS4208_MAC_AUTO,
 	CS4208_MBA6,
 	CS4208_MBP11,
+	CS4208_MACMINI,
 	CS4208_GPIO0,
 };
 
@@ -621,6 +622,7 @@
 	{ .id = CS4208_GPIO0, .name = "gpio0" },
 	{ .id = CS4208_MBA6, .name = "mba6" },
 	{ .id = CS4208_MBP11, .name = "mbp11" },
+	{ .id = CS4208_MACMINI, .name = "macmini" },
 	{}
 };
 
@@ -632,6 +634,7 @@
 /* codec SSID matching */
 static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+	SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
 	SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
 	SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
 	SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
@@ -666,6 +669,24 @@
 	snd_hda_apply_fixup(codec, action);
 }
 
+/* MacMini 7,1 has the inverted jack detection */
+static void cs4208_fixup_macmini(struct hda_codec *codec,
+				 const struct hda_fixup *fix, int action)
+{
+	static const struct hda_pintbl pincfgs[] = {
+		{ 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
+		{ 0x21, 0x004be140 }, /* SPDIF: disable detect */
+		{ }
+	};
+
+	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+		/* HP pin (0x10) has an inverted detection */
+		codec->inv_jack_detect = 1;
+		/* disable the bogus Mic and SPDIF jack detections */
+		snd_hda_apply_pincfgs(codec, pincfgs);
+	}
+}
+
 static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
 			       struct snd_ctl_elem_value *ucontrol)
 {
@@ -709,6 +730,12 @@
 		.chained = true,
 		.chain_id = CS4208_GPIO0,
 	},
+	[CS4208_MACMINI] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = cs4208_fixup_macmini,
+		.chained = true,
+		.chain_id = CS4208_GPIO0,
+	},
 	[CS4208_GPIO0] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = cs4208_fixup_gpio0,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 1f52b55..8ee78db 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -448,7 +448,8 @@
 	eld = &per_pin->sink_eld;
 
 	mutex_lock(&per_pin->lock);
-	if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+	if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
+	    eld->eld_size > ELD_MAX_SIZE) {
 		mutex_unlock(&per_pin->lock);
 		snd_BUG();
 		return -EINVAL;
@@ -1193,7 +1194,7 @@
 static void jack_callback(struct hda_codec *codec,
 			  struct hda_jack_callback *jack)
 {
-	check_presence_and_report(codec, jack->tbl->nid);
+	check_presence_and_report(codec, jack->nid);
 }
 
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3375324..efd4980 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -282,7 +282,7 @@
 	uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
 	if (!uctl)
 		return;
-	val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
+	val = snd_hda_codec_read(codec, jack->nid, 0,
 				 AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
 	val &= HDA_AMP_VOLMASK;
 	uctl->value.integer.value[0] = val;
@@ -327,6 +327,7 @@
 	case 0x10ec0292:
 		alc_update_coef_idx(codec, 0x4, 1<<15, 0);
 		break;
+	case 0x10ec0225:
 	case 0x10ec0233:
 	case 0x10ec0255:
 	case 0x10ec0256:
@@ -900,6 +901,7 @@
 	{ 0x10ec0899, 0x1028, 0, "ALC3861" },
 	{ 0x10ec0298, 0x1028, 0, "ALC3266" },
 	{ 0x10ec0256, 0x1028, 0, "ALC3246" },
+	{ 0x10ec0225, 0x1028, 0, "ALC3253" },
 	{ 0x10ec0670, 0x1025, 0, "ALC669X" },
 	{ 0x10ec0676, 0x1025, 0, "ALC679X" },
 	{ 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -1785,7 +1787,6 @@
 	ALC882_FIXUP_NO_PRIMARY_HP,
 	ALC887_FIXUP_ASUS_BASS,
 	ALC887_FIXUP_BASS_CHMAP,
-	ALC882_FIXUP_DISABLE_AAMIX,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1947,8 +1948,6 @@
 
 static void alc_fixup_bass_chmap(struct hda_codec *codec,
 				 const struct hda_fixup *fix, int action);
-static void alc_fixup_disable_aamix(struct hda_codec *codec,
-				    const struct hda_fixup *fix, int action);
 
 static const struct hda_fixup alc882_fixups[] = {
 	[ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2186,10 +2185,6 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc_fixup_bass_chmap,
 	},
-	[ALC882_FIXUP_DISABLE_AAMIX] = {
-		.type = HDA_FIXUP_FUNC,
-		.v.func = alc_fixup_disable_aamix,
-	},
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2228,6 +2223,7 @@
 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
 
 	/* All Apple entries are in codec SSIDs */
 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2257,7 +2253,6 @@
 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
-	SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2651,6 +2646,7 @@
 	ALC269_TYPE_ALC298,
 	ALC269_TYPE_ALC255,
 	ALC269_TYPE_ALC256,
+	ALC269_TYPE_ALC225,
 };
 
 /*
@@ -2680,6 +2676,7 @@
 	case ALC269_TYPE_ALC298:
 	case ALC269_TYPE_ALC255:
 	case ALC269_TYPE_ALC256:
+	case ALC269_TYPE_ALC225:
 		ssids = alc269_ssids;
 		break;
 	default:
@@ -3658,6 +3655,16 @@
 		WRITE_COEF(0xb7, 0x802b),
 		{}
 	};
+	static struct coef_fw coef0225[] = {
+		UPDATE_COEF(0x4a, 1<<8, 0),
+		UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
+		UPDATE_COEF(0x63, 3<<14, 3<<14),
+		UPDATE_COEF(0x4a, 3<<4, 2<<4),
+		UPDATE_COEF(0x4a, 3<<10, 3<<10),
+		UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+		UPDATE_COEF(0x4a, 3<<10, 0),
+		{}
+	};
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
@@ -3682,6 +3689,9 @@
 	case 0x10ec0668:
 		alc_process_coef_fw(codec, coef0668);
 		break;
+	case 0x10ec0225:
+		alc_process_coef_fw(codec, coef0225);
+		break;
 	}
 	codec_dbg(codec, "Headset jack set to unplugged mode.\n");
 }
@@ -3727,6 +3737,13 @@
 		UPDATE_COEF(0xc3, 0, 1<<12),
 		{}
 	};
+	static struct coef_fw coef0225[] = {
+		UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14),
+		UPDATE_COEF(0x4a, 3<<4, 2<<4),
+		UPDATE_COEF(0x63, 3<<14, 0),
+		{}
+	};
+
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
@@ -3772,6 +3789,12 @@
 		alc_process_coef_fw(codec, coef0688);
 		snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
 		break;
+	case 0x10ec0225:
+		alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
+		snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+		alc_process_coef_fw(codec, coef0225);
+		snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+		break;
 	}
 	codec_dbg(codec, "Headset jack set to mic-in mode.\n");
 }
@@ -3884,6 +3907,13 @@
 		WRITE_COEF(0xc3, 0x0000),
 		{}
 	};
+	static struct coef_fw coef0225[] = {
+		UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
+		UPDATE_COEF(0x49, 1<<8, 1<<8),
+		UPDATE_COEF(0x4a, 7<<6, 7<<6),
+		UPDATE_COEF(0x4a, 3<<4, 3<<4),
+		{}
+	};
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
@@ -3912,6 +3942,9 @@
 	case 0x10ec0668:
 		alc_process_coef_fw(codec, coef0688);
 		break;
+	case 0x10ec0225:
+		alc_process_coef_fw(codec, coef0225);
+		break;
 	}
 	codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
 }
@@ -3955,6 +3988,13 @@
 		WRITE_COEF(0xc3, 0x0000),
 		{}
 	};
+	static struct coef_fw coef0225[] = {
+		UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
+		UPDATE_COEF(0x49, 1<<8, 1<<8),
+		UPDATE_COEF(0x4a, 7<<6, 7<<6),
+		UPDATE_COEF(0x4a, 3<<4, 3<<4),
+		{}
+	};
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
@@ -3983,6 +4023,9 @@
 	case 0x10ec0668:
 		alc_process_coef_fw(codec, coef0688);
 		break;
+	case 0x10ec0225:
+		alc_process_coef_fw(codec, coef0225);
+		break;
 	}
 	codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
 }
@@ -4014,6 +4057,11 @@
 		WRITE_COEF(0xc3, 0x0c00),
 		{}
 	};
+	static struct coef_fw coef0225[] = {
+		UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+		UPDATE_COEF(0x49, 1<<8, 1<<8),
+		{}
+	};
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
@@ -4058,6 +4106,12 @@
 		val = alc_read_coef_idx(codec, 0xbe);
 		is_ctia = (val & 0x1c02) == 0x1c02;
 		break;
+	case 0x10ec0225:
+		alc_process_coef_fw(codec, coef0225);
+		msleep(800);
+		val = alc_read_coef_idx(codec, 0x46);
+		is_ctia = (val & 0x00f0) == 0x00f0;
+		break;
 	}
 
 	codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
@@ -5560,6 +5614,9 @@
 	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
 	{}
 };
+#define ALC225_STANDARD_PINS \
+	{0x12, 0xb7a60130}, \
+	{0x21, 0x04211020}
 
 #define ALC256_STANDARD_PINS \
 	{0x12, 0x90a60140}, \
@@ -5581,6 +5638,12 @@
 	{0x21, 0x03211020}
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC225_STANDARD_PINS,
+		{0x14, 0x901701a0}),
+	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC225_STANDARD_PINS,
+		{0x14, 0x901701b0}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
 		{0x14, 0x90170110},
 		{0x21, 0x02211020}),
@@ -5906,6 +5969,9 @@
 		spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
 		break;
+	case 0x10ec0225:
+		spec->codec_variant = ALC269_TYPE_ALC225;
+		break;
 	}
 
 	if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6796,6 +6862,7 @@
  */
 static const struct hda_device_id snd_hda_id_realtek[] = {
 	HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 2c7c5eb..37b70f8 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -493,9 +493,9 @@
 	if (!spec->num_pwrs)
 		return;
 
-	if (jack && jack->tbl->nid) {
-		stac_toggle_power_map(codec, jack->tbl->nid,
-				      snd_hda_jack_detect(codec, jack->tbl->nid),
+	if (jack && jack->nid) {
+		stac_toggle_power_map(codec, jack->nid,
+				      snd_hda_jack_detect(codec, jack->nid),
 				      true);
 		return;
 	}
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 3191e0a..d1fb035 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -635,6 +635,7 @@
 					    SNDRV_PCM_HW_PARAM_PERIODS);
 	if (ret < 0) {
 		dev_err(prtd->platform->dev, "set integer constraint failed\n");
+		kfree(adata);
 		return ret;
 	}
 
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 33143fe..9178531 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1929,6 +1929,25 @@
 	{ 1000000, 13500000, 0,  1 },
 };
 
+static const unsigned int pseudo_fref_max[ARIZONA_FLL_MAX_FRATIO] = {
+	13500000,
+	 6144000,
+	 6144000,
+	 3072000,
+	 3072000,
+	 2822400,
+	 2822400,
+	 1536000,
+	 1536000,
+	 1536000,
+	 1536000,
+	 1536000,
+	 1536000,
+	 1536000,
+	 1536000,
+	  768000,
+};
+
 static struct {
 	unsigned int min;
 	unsigned int max;
@@ -2042,16 +2061,32 @@
 	/* Adjust FRATIO/refdiv to avoid integer mode if possible */
 	refdiv = cfg->refdiv;
 
+	arizona_fll_dbg(fll, "pseudo: initial ratio=%u fref=%u refdiv=%u\n",
+			init_ratio, Fref, refdiv);
+
 	while (div <= ARIZONA_FLL_MAX_REFDIV) {
 		for (ratio = init_ratio; ratio <= ARIZONA_FLL_MAX_FRATIO;
 		     ratio++) {
 			if ((ARIZONA_FLL_VCO_CORNER / 2) /
-			    (fll->vco_mult * ratio) < Fref)
+			    (fll->vco_mult * ratio) < Fref) {
+				arizona_fll_dbg(fll, "pseudo: hit VCO corner\n");
 				break;
+			}
+
+			if (Fref > pseudo_fref_max[ratio - 1]) {
+				arizona_fll_dbg(fll,
+					"pseudo: exceeded max fref(%u) for ratio=%u\n",
+					pseudo_fref_max[ratio - 1],
+					ratio);
+				break;
+			}
 
 			if (target % (ratio * Fref)) {
 				cfg->refdiv = refdiv;
 				cfg->fratio = ratio - 1;
+				arizona_fll_dbg(fll,
+					"pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
+					Fref, refdiv, div, ratio);
 				return ratio;
 			}
 		}
@@ -2060,6 +2095,9 @@
 			if (target % (ratio * Fref)) {
 				cfg->refdiv = refdiv;
 				cfg->fratio = ratio - 1;
+				arizona_fll_dbg(fll,
+					"pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
+					Fref, refdiv, div, ratio);
 				return ratio;
 			}
 		}
@@ -2068,6 +2106,9 @@
 		Fref /= 2;
 		refdiv++;
 		init_ratio = arizona_find_fratio(Fref, NULL);
+		arizona_fll_dbg(fll,
+				"pseudo: change fref=%u refdiv=%d(%d) ratio=%u\n",
+				Fref, refdiv, div, init_ratio);
 	}
 
 	arizona_fll_warn(fll, "Falling back to integer mode operation\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index bc08f0c..1bd3164 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -266,6 +266,8 @@
 		} else {
 			*mic = false;
 			regmap_write(rt286->regmap, RT286_SET_MIC1, 0x20);
+			regmap_update_bits(rt286->regmap,
+				RT286_CBJ_CTRL1, 0x0400, 0x0000);
 		}
 	} else {
 		regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf);
@@ -470,24 +472,6 @@
 	return 0;
 }
 
-static int rt286_vref_event(struct snd_soc_dapm_widget *w,
-			     struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits(codec,
-			RT286_CBJ_CTRL1, 0x0400, 0x0000);
-		mdelay(50);
-		break;
-	default:
-		return 0;
-	}
-
-	return 0;
-}
-
 static int rt286_ldo2_event(struct snd_soc_dapm_widget *w,
 			     struct snd_kcontrol *kcontrol, int event)
 {
@@ -536,7 +520,7 @@
 	SND_SOC_DAPM_SUPPLY_S("HV", 1, RT286_POWER_CTRL1,
 		12, 1, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("VREF", RT286_POWER_CTRL1,
-		0, 1, rt286_vref_event, SND_SOC_DAPM_PRE_PMU),
+		0, 1, NULL, 0),
 	SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT286_POWER_CTRL2,
 		2, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY_S("LDO2", 2, RT286_POWER_CTRL1,
@@ -911,8 +895,6 @@
 	case SND_SOC_BIAS_ON:
 		mdelay(10);
 		snd_soc_update_bits(codec,
-			RT286_CBJ_CTRL1, 0x0400, 0x0400);
-		snd_soc_update_bits(codec,
 			RT286_DC_GAIN, 0x200, 0x0);
 
 		break;
@@ -920,8 +902,6 @@
 	case SND_SOC_BIAS_STANDBY:
 		snd_soc_write(codec,
 			RT286_SET_AUDIO_POWER, AC_PWRST_D3);
-		snd_soc_update_bits(codec,
-			RT286_CBJ_CTRL1, 0x0400, 0x0000);
 		break;
 
 	default:
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index c61d38b..93e8c90 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -776,7 +776,7 @@
 
 	/* IN1/IN2 Control */
 	SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
-		RT5645_BST_SFT1, 8, 0, bst_tlv),
+		RT5645_BST_SFT1, 12, 0, bst_tlv),
 	SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
 		RT5645_BST_SFT2, 8, 0, bst_tlv),
 
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 820d8fa..fb8ea05 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -3985,7 +3985,6 @@
 	if (rt5659 == NULL)
 		return -ENOMEM;
 
-	rt5659->i2c = i2c;
 	i2c_set_clientdata(i2c, rt5659);
 
 	if (pdata)
@@ -4157,24 +4156,17 @@
 
 	INIT_DELAYED_WORK(&rt5659->jack_detect_work, rt5659_jack_detect_work);
 
-	if (rt5659->i2c->irq) {
-		ret = request_threaded_irq(rt5659->i2c->irq, NULL, rt5659_irq,
-			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+	if (i2c->irq) {
+		ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+			rt5659_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
 			| IRQF_ONESHOT, "rt5659", rt5659);
 		if (ret)
 			dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
 
 	}
 
-	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
+	return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
 			rt5659_dai, ARRAY_SIZE(rt5659_dai));
-
-	if (ret) {
-		if (rt5659->i2c->irq)
-			free_irq(rt5659->i2c->irq, rt5659);
-	}
-
-	return 0;
 }
 
 static int rt5659_i2c_remove(struct i2c_client *i2c)
@@ -4191,24 +4183,29 @@
 	regmap_write(rt5659->regmap, RT5659_RESET, 0);
 }
 
+#ifdef CONFIG_OF
 static const struct of_device_id rt5659_of_match[] = {
 	{ .compatible = "realtek,rt5658", },
 	{ .compatible = "realtek,rt5659", },
-	{},
+	{ },
 };
+MODULE_DEVICE_TABLE(of, rt5659_of_match);
+#endif
 
+#ifdef CONFIG_ACPI
 static struct acpi_device_id rt5659_acpi_match[] = {
-		{ "10EC5658", 0},
-		{ "10EC5659", 0},
-		{ },
+	{ "10EC5658", 0, },
+	{ "10EC5659", 0, },
+	{ },
 };
 MODULE_DEVICE_TABLE(acpi, rt5659_acpi_match);
+#endif
 
 struct i2c_driver rt5659_i2c_driver = {
 	.driver = {
 		.name = "rt5659",
 		.owner = THIS_MODULE,
-		.of_match_table = rt5659_of_match,
+		.of_match_table = of_match_ptr(rt5659_of_match),
 		.acpi_match_table = ACPI_PTR(rt5659_acpi_match),
 	},
 	.probe = rt5659_i2c_probe,
diff --git a/sound/soc/codecs/rt5659.h b/sound/soc/codecs/rt5659.h
index 8f07ee9..d31c9e5 100644
--- a/sound/soc/codecs/rt5659.h
+++ b/sound/soc/codecs/rt5659.h
@@ -1792,7 +1792,6 @@
 	struct snd_soc_codec *codec;
 	struct rt5659_platform_data pdata;
 	struct regmap *regmap;
-	struct i2c_client *i2c;
 	struct gpio_desc *gpiod_ldo1_en;
 	struct gpio_desc *gpiod_reset;
 	struct snd_soc_jack *hs_jack;
diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c
index 21ca3a5..d374c18 100644
--- a/sound/soc/codecs/sigmadsp-i2c.c
+++ b/sound/soc/codecs/sigmadsp-i2c.c
@@ -31,7 +31,10 @@
 
 	kfree(buf);
 
-	return ret;
+	if (ret < 0)
+		return ret;
+
+	return 0;
 }
 
 static int sigmadsp_read_i2c(void *control_data,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 6088d30..97c0f1e 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2382,6 +2382,7 @@
 
 static int wm5110_remove(struct platform_device *pdev)
 {
+	snd_soc_unregister_platform(&pdev->dev);
 	snd_soc_unregister_codec(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index ff23772..d7f444f 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -240,13 +240,13 @@
 SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
 	7, 1, 1),
 
-SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
-	       WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
-SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
-	       WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
 SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
-	       WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
+	       WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
 SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
+	       WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
+SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
+	       WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
+SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
 	       WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv),
 SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume",
 		WM8960_RINPATH, 4, 3, 0, micboost_tlv),
@@ -643,29 +643,31 @@
 		return -EINVAL;
 	}
 
-	/* check if the sysclk frequency is available. */
-	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
-		if (sysclk_divs[i] == -1)
-			continue;
-		sysclk = freq_out / sysclk_divs[i];
-		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
-			if (sysclk == dac_divs[j] * lrclk) {
+	if (wm8960->clk_id != WM8960_SYSCLK_PLL) {
+		/* check if the sysclk frequency is available. */
+		for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+			if (sysclk_divs[i] == -1)
+				continue;
+			sysclk = freq_out / sysclk_divs[i];
+			for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+				if (sysclk != dac_divs[j] * lrclk)
+					continue;
 				for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
 					if (sysclk == bclk * bclk_divs[k] / 10)
 						break;
 				if (k != ARRAY_SIZE(bclk_divs))
 					break;
 			}
+			if (j != ARRAY_SIZE(dac_divs))
+				break;
 		}
-		if (j != ARRAY_SIZE(dac_divs))
-			break;
-	}
 
-	if (i != ARRAY_SIZE(sysclk_divs)) {
-		goto configure_clock;
-	} else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
-		dev_err(codec->dev, "failed to configure clock\n");
-		return -EINVAL;
+		if (i != ARRAY_SIZE(sysclk_divs)) {
+			goto configure_clock;
+		} else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
+			dev_err(codec->dev, "failed to configure clock\n");
+			return -EINVAL;
+		}
 	}
 	/* get a available pll out frequency and set pll */
 	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index ce664c2..bff258d 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -645,6 +645,8 @@
 
 	dev->dev = &pdev->dev;
 
+	dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
+	dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
 	if (pdata) {
 		dev->capability = pdata->cap;
 		clk_id = NULL;
@@ -652,9 +654,6 @@
 		if (dev->quirks & DW_I2S_QUIRK_COMP_REG_OFFSET) {
 			dev->i2s_reg_comp1 = pdata->i2s_reg_comp1;
 			dev->i2s_reg_comp2 = pdata->i2s_reg_comp2;
-		} else {
-			dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
-			dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
 		}
 		ret = dw_configure_dai_by_pd(dev, dw_i2s_dai, res, pdata);
 	} else {
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 40dfd8a..ed8de10 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -112,20 +112,6 @@
 	struct fsl_ssi_reg_val tx;
 };
 
-static const struct reg_default fsl_ssi_reg_defaults[] = {
-	{CCSR_SSI_SCR,     0x00000000},
-	{CCSR_SSI_SIER,    0x00003003},
-	{CCSR_SSI_STCR,    0x00000200},
-	{CCSR_SSI_SRCR,    0x00000200},
-	{CCSR_SSI_STCCR,   0x00040000},
-	{CCSR_SSI_SRCCR,   0x00040000},
-	{CCSR_SSI_SACNT,   0x00000000},
-	{CCSR_SSI_STMSK,   0x00000000},
-	{CCSR_SSI_SRMSK,   0x00000000},
-	{CCSR_SSI_SACCEN,  0x00000000},
-	{CCSR_SSI_SACCDIS, 0x00000000},
-};
-
 static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
@@ -190,8 +176,7 @@
 	.val_bits = 32,
 	.reg_stride = 4,
 	.val_format_endian = REGMAP_ENDIAN_NATIVE,
-	.reg_defaults = fsl_ssi_reg_defaults,
-	.num_reg_defaults = ARRAY_SIZE(fsl_ssi_reg_defaults),
+	.num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
 	.readable_reg = fsl_ssi_readable_reg,
 	.volatile_reg = fsl_ssi_volatile_reg,
 	.precious_reg = fsl_ssi_precious_reg,
@@ -201,6 +186,7 @@
 
 struct fsl_ssi_soc_data {
 	bool imx;
+	bool imx21regs; /* imx21-class SSI - no SACC{ST,EN,DIS} regs */
 	bool offline_config;
 	u32 sisr_write_mask;
 };
@@ -303,6 +289,7 @@
 
 static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
 	.imx = true,
+	.imx21regs = true,
 	.offline_config = true,
 	.sisr_write_mask = 0,
 };
@@ -586,8 +573,12 @@
 	 */
 	regmap_write(regs, CCSR_SSI_SACNT,
 			CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
-	regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
-	regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
+
+	/* no SACC{ST,EN,DIS} regs on imx21-class SSI */
+	if (!ssi_private->soc->imx21regs) {
+		regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
+		regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
+	}
 
 	/*
 	 * Enable SSI, Transmit and Receive. AC97 has to communicate with the
@@ -1397,6 +1388,7 @@
 	struct resource *res;
 	void __iomem *iomem;
 	char name[64];
+	struct regmap_config regconfig = fsl_ssi_regconfig;
 
 	of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
 	if (!of_id || !of_id->data)
@@ -1444,15 +1436,25 @@
 		return PTR_ERR(iomem);
 	ssi_private->ssi_phys = res->start;
 
+	if (ssi_private->soc->imx21regs) {
+		/*
+		 * According to datasheet imx21-class SSI
+		 * don't have SACC{ST,EN,DIS} regs.
+		 */
+		regconfig.max_register = CCSR_SSI_SRMSK;
+		regconfig.num_reg_defaults_raw =
+			CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
+	}
+
 	ret = of_property_match_string(np, "clock-names", "ipg");
 	if (ret < 0) {
 		ssi_private->has_ipg_clk_name = false;
 		ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
-			&fsl_ssi_regconfig);
+			&regconfig);
 	} else {
 		ssi_private->has_ipg_clk_name = true;
 		ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
-			"ipg", iomem, &fsl_ssi_regconfig);
+			"ipg", iomem, &regconfig);
 	}
 	if (IS_ERR(ssi_private->regs)) {
 		dev_err(&pdev->dev, "Failed to init register map\n");
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index a407e83..fb896b2 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -72,8 +72,6 @@
 		goto end;
 	}
 
-	platform_set_drvdata(pdev, data);
-
 end:
 	of_node_put(spdif_np);
 
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 1ded881..2389ab4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -99,7 +99,7 @@
 		if (ret && ret != -ENOTSUPP)
 			goto err;
 	}
-
+	return 0;
 err:
 	return ret;
 }
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 803f95e..7d7c872 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -30,11 +30,15 @@
 config SND_SOC_INTEL_SST
 	tristate
 	select SND_SOC_INTEL_SST_ACPI if ACPI
+	select SND_SOC_INTEL_SST_MATCH if ACPI
 	depends on (X86 || COMPILE_TEST)
 
 config SND_SOC_INTEL_SST_ACPI
 	tristate
 
+config SND_SOC_INTEL_SST_MATCH
+	tristate
+
 config SND_SOC_INTEL_HASWELL
 	tristate
 
@@ -57,7 +61,7 @@
 config SND_SOC_INTEL_BYT_RT5640_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
 	depends on X86_INTEL_LPSS && I2C
-	depends on DW_DMAC_CORE=y && (SND_SOC_INTEL_BYTCR_RT5640_MACH = n)
+	depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
 	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_RT5640
@@ -69,7 +73,7 @@
 config SND_SOC_INTEL_BYT_MAX98090_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
 	depends on X86_INTEL_LPSS && I2C
-	depends on DW_DMAC_CORE=y
+	depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
 	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_MAX98090
@@ -97,6 +101,7 @@
 	select SND_SOC_RT5640
 	select SND_SST_MFLD_PLATFORM
 	select SND_SST_IPC_ACPI
+	select SND_SOC_INTEL_SST_MATCH if ACPI
 	help
           This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
           platforms with RT5640 audio codec.
@@ -109,6 +114,7 @@
 	select SND_SOC_RT5651
 	select SND_SST_MFLD_PLATFORM
 	select SND_SST_IPC_ACPI
+	select SND_SOC_INTEL_SST_MATCH if ACPI
 	help
           This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
           platforms with RT5651 audio codec.
@@ -121,6 +127,7 @@
         select SND_SOC_RT5670
         select SND_SST_MFLD_PLATFORM
         select SND_SST_IPC_ACPI
+	select SND_SOC_INTEL_SST_MATCH if ACPI
         help
           This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
           platforms with RT5672 audio codec.
@@ -133,6 +140,7 @@
 	select SND_SOC_RT5645
 	select SND_SST_MFLD_PLATFORM
 	select SND_SST_IPC_ACPI
+	select SND_SOC_INTEL_SST_MATCH if ACPI
 	help
 	  This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
 	  platforms with RT5645/5650 audio codec.
@@ -145,6 +153,7 @@
 	select SND_SOC_TS3A227E
 	select SND_SST_MFLD_PLATFORM
 	select SND_SST_IPC_ACPI
+	select SND_SOC_INTEL_SST_MATCH if ACPI
 	help
       This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
       platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 55c33dc7..52ed434 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -528,6 +528,7 @@
 	.ops = &sst_compr_dai_ops,
 	.playback = {
 		.stream_name = "Compress Playback",
+		.channels_min = 1,
 	},
 },
 /* BE CPU  Dais */
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 7396ddb..2cbcbe4 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -212,7 +212,10 @@
 {
 	struct snd_interval *channels = hw_param_interval(params,
 						SNDRV_PCM_HW_PARAM_CHANNELS);
-	channels->min = channels->max = 4;
+	if (params_channels(params) == 2)
+		channels->min = channels->max = 2;
+	else
+		channels->min = channels->max = 4;
 
 	return 0;
 }
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 668fdee..fbbb25c 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -1,13 +1,10 @@
 snd-soc-sst-dsp-objs := sst-dsp.o
-ifneq ($(CONFIG_SND_SST_IPC_ACPI),)
-snd-soc-sst-acpi-objs := sst-match-acpi.o
-else
-snd-soc-sst-acpi-objs := sst-acpi.o sst-match-acpi.o
-endif
-
+snd-soc-sst-acpi-objs := sst-acpi.o
+snd-soc-sst-match-objs := sst-match-acpi.o
 snd-soc-sst-ipc-objs := sst-ipc.o
 
 snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o
 
 obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
 obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
+obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o
diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
index 7a85c57..2c5eda1 100644
--- a/sound/soc/intel/common/sst-acpi.c
+++ b/sound/soc/intel/common/sst-acpi.c
@@ -215,6 +215,7 @@
 	.dma_size = SST_LPT_DSP_DMA_SIZE,
 };
 
+#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
 static struct sst_acpi_mach baytrail_machines[] = {
 	{ "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
 	{ "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
@@ -231,11 +232,14 @@
 	.sst_id = SST_DEV_ID_BYT,
 	.resindex_dma_base = -1,
 };
+#endif
 
 static const struct acpi_device_id sst_acpi_match[] = {
 	{ "INT33C8", (unsigned long)&sst_acpi_haswell_desc },
 	{ "INT3438", (unsigned long)&sst_acpi_broadwell_desc },
+#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
 	{ "80860F28", (unsigned long)&sst_acpi_baytrail_desc },
+#endif
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, sst_acpi_match);
diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c
index dd077e1..3b4539d 100644
--- a/sound/soc/intel/common/sst-match-acpi.c
+++ b/sound/soc/intel/common/sst-match-acpi.c
@@ -41,3 +41,6 @@
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index de6dac4..4629372 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -688,14 +688,14 @@
 	/* get src queue index */
 	src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
 	if (src_index < 0)
-		return -EINVAL;
+		return 0;
 
 	msg.src_queue = src_index;
 
 	/* get dst queue index */
 	dst_index  = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
 	if (dst_index < 0)
-		return -EINVAL;
+		return 0;
 
 	msg.dst_queue = dst_index;
 
@@ -747,7 +747,7 @@
 
 	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
 
-	if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+	if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
 		dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
 		return 0;
 
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index f355325..b6e6b61 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -863,6 +863,7 @@
 		else
 			delay += hstream->bufsize;
 	}
+	delay = (hstream->bufsize == delay) ? 0 : delay;
 
 	if (delay >= hstream->period_bytes) {
 		dev_info(bus->dev,
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 4624556..a294fee 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -54,12 +54,9 @@
 
 /*
  * Each pipelines needs memory to be allocated. Check if we have free memory
- * from available pool. Then only add this to pool
- * This is freed when pipe is deleted
- * Note: DSP does actual memory management we only keep track for complete
- * pool
+ * from available pool.
  */
-static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
+static bool skl_is_pipe_mem_avail(struct skl *skl,
 				struct skl_module_cfg *mconfig)
 {
 	struct skl_sst *ctx = skl->skl_sst;
@@ -74,10 +71,20 @@
 				"exceeds ppl memory available %d mem %d\n",
 				skl->resource.max_mem, skl->resource.mem);
 		return false;
+	} else {
+		return true;
 	}
+}
 
+/*
+ * Add the mem to the mem pool. This is freed when pipe is deleted.
+ * Note: DSP does actual memory management we only keep track for complete
+ * pool
+ */
+static void skl_tplg_alloc_pipe_mem(struct skl *skl,
+				struct skl_module_cfg *mconfig)
+{
 	skl->resource.mem += mconfig->pipe->memory_pages;
-	return true;
 }
 
 /*
@@ -85,10 +92,10 @@
  * quantified in MCPS (Million Clocks Per Second) required for module/pipe
  *
  * Each pipelines needs mcps to be allocated. Check if we have mcps for this
- * pipe. This adds the mcps to driver counter
- * This is removed on pipeline delete
+ * pipe.
  */
-static bool skl_tplg_alloc_pipe_mcps(struct skl *skl,
+
+static bool skl_is_pipe_mcps_avail(struct skl *skl,
 				struct skl_module_cfg *mconfig)
 {
 	struct skl_sst *ctx = skl->skl_sst;
@@ -98,13 +105,18 @@
 			"%s: module_id %d instance %d\n", __func__,
 			mconfig->id.module_id, mconfig->id.instance_id);
 		dev_err(ctx->dev,
-			"exceeds ppl memory available %d > mem %d\n",
+			"exceeds ppl mcps available %d > mem %d\n",
 			skl->resource.max_mcps, skl->resource.mcps);
 		return false;
+	} else {
+		return true;
 	}
+}
 
+static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
+				struct skl_module_cfg *mconfig)
+{
 	skl->resource.mcps += mconfig->mcps;
-	return true;
 }
 
 /*
@@ -411,7 +423,7 @@
 		mconfig = w->priv;
 
 		/* check resource available */
-		if (!skl_tplg_alloc_pipe_mcps(skl, mconfig))
+		if (!skl_is_pipe_mcps_avail(skl, mconfig))
 			return -ENOMEM;
 
 		if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
@@ -435,6 +447,7 @@
 		ret = skl_tplg_set_module_params(w, ctx);
 		if (ret < 0)
 			return ret;
+		skl_tplg_alloc_pipe_mcps(skl, mconfig);
 	}
 
 	return 0;
@@ -477,10 +490,10 @@
 	struct skl_sst *ctx = skl->skl_sst;
 
 	/* check resource available */
-	if (!skl_tplg_alloc_pipe_mcps(skl, mconfig))
+	if (!skl_is_pipe_mcps_avail(skl, mconfig))
 		return -EBUSY;
 
-	if (!skl_tplg_alloc_pipe_mem(skl, mconfig))
+	if (!skl_is_pipe_mem_avail(skl, mconfig))
 		return -ENOMEM;
 
 	/*
@@ -526,11 +539,15 @@
 		src_module = dst_module;
 	}
 
+	skl_tplg_alloc_pipe_mem(skl, mconfig);
+	skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
 	return 0;
 }
 
 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
 				struct skl *skl,
+				struct snd_soc_dapm_widget *src_w,
 				struct skl_module_cfg *src_mconfig)
 {
 	struct snd_soc_dapm_path *p;
@@ -547,6 +564,10 @@
 		dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
 
 		next_sink = p->sink;
+
+		if (!is_skl_dsp_widget_type(p->sink))
+			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
+
 		/*
 		 * here we will check widgets in sink pipelines, so that
 		 * can be any widgets type and we are only interested if
@@ -576,7 +597,7 @@
 	}
 
 	if (!sink)
-		return skl_tplg_bind_sinks(next_sink, skl, src_mconfig);
+		return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
 
 	return 0;
 }
@@ -605,7 +626,7 @@
 	 * if sink is not started, start sink pipe first, then start
 	 * this pipe
 	 */
-	ret = skl_tplg_bind_sinks(w, skl, src_mconfig);
+	ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
 	if (ret)
 		return ret;
 
@@ -773,10 +794,7 @@
 			continue;
 		}
 
-		ret = skl_unbind_modules(ctx, src_module, dst_module);
-		if (ret < 0)
-			return ret;
-
+		skl_unbind_modules(ctx, src_module, dst_module);
 		src_module = dst_module;
 	}
 
@@ -814,9 +832,6 @@
 			 * This is a connecter and if path is found that means
 			 * unbind between source and sink has not happened yet
 			 */
-			ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
-			if (ret < 0)
-				return ret;
 			ret = skl_unbind_modules(ctx, src_mconfig,
 							sink_mconfig);
 		}
@@ -842,6 +857,12 @@
 	case SND_SOC_DAPM_PRE_PMU:
 		return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
 
+	case SND_SOC_DAPM_POST_PMU:
+		return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
+
+	case SND_SOC_DAPM_PRE_PMD:
+		return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
+
 	case SND_SOC_DAPM_POST_PMD:
 		return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
 	}
@@ -916,6 +937,13 @@
 		skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
 				      bc->max, bc->param_id, mconfig);
 
+	/* decrement size for TLV header */
+	size -= 2 * sizeof(u32);
+
+	/* check size as we don't want to send kernel data */
+	if (size > bc->max)
+		size = bc->max;
+
 	if (bc->params) {
 		if (copy_to_user(data, &bc->param_id, sizeof(u32)))
 			return -EFAULT;
@@ -1510,6 +1538,7 @@
 					&skl_tplg_ops, fw, 0);
 	if (ret < 0) {
 		dev_err(bus->dev, "tplg component load failed%d\n", ret);
+		release_firmware(fw);
 		return -EINVAL;
 	}
 
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 443a15d..092705e 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -614,8 +614,6 @@
 		goto out_unregister;
 
 	/*configure PM */
-	pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
-	pm_runtime_use_autosuspend(bus->dev);
 	pm_runtime_put_noidle(bus->dev);
 	pm_runtime_allow(bus->dev);
 
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 15c04e2..9769676 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -9,7 +9,7 @@
 
 config SND_SOC_MT8173_MAX98090
 	tristate "ASoC Audio driver for MT8173 with MAX98090 codec"
-	depends on SND_SOC_MEDIATEK
+	depends on SND_SOC_MEDIATEK && I2C
 	select SND_SOC_MAX98090
 	help
 	  This adds ASoC driver for Mediatek MT8173 boards
@@ -19,7 +19,7 @@
 
 config SND_SOC_MT8173_RT5650_RT5676
 	tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs"
-	depends on SND_SOC_MEDIATEK
+	depends on SND_SOC_MEDIATEK && I2C
 	select SND_SOC_RT5645
 	select SND_SOC_RT5677
 	help
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index c866ade..a6c7b8d 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -381,9 +381,19 @@
 	__raw_writel(BM_SAIF_CTRL_CLKGATE,
 		saif->base + SAIF_CTRL + MXS_CLR_ADDR);
 
+	clk_prepare(saif->clk);
+
 	return 0;
 }
 
+static void mxs_saif_shutdown(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *cpu_dai)
+{
+	struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
+
+	clk_unprepare(saif->clk);
+}
+
 /*
  * Should only be called when port is inactive.
  * although can be called multiple times by upper layers.
@@ -424,8 +434,6 @@
 		return ret;
 	}
 
-	/* prepare clk in hw_param, enable in trigger */
-	clk_prepare(saif->clk);
 	if (saif != master_saif) {
 		/*
 		* Set an initial clock rate for the saif internal logic to work
@@ -611,6 +619,7 @@
 
 static const struct snd_soc_dai_ops mxs_saif_dai_ops = {
 	.startup = mxs_saif_startup,
+	.shutdown = mxs_saif_shutdown,
 	.trigger = mxs_saif_trigger,
 	.prepare = mxs_saif_prepare,
 	.hw_params = mxs_saif_hw_params,
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 79688aa..4aeb8e1 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -440,18 +440,18 @@
 }
 
 static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream,
-		struct snd_soc_pcm_runtime *soc_runtime)
+		struct snd_soc_pcm_runtime *rt)
 {
 	struct snd_dma_buffer *buf = &substream->dma_buffer;
 	size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
 
 	buf->dev.type = SNDRV_DMA_TYPE_DEV;
-	buf->dev.dev = soc_runtime->dev;
+	buf->dev.dev = rt->platform->dev;
 	buf->private_data = NULL;
-	buf->area = dma_alloc_coherent(soc_runtime->dev, size, &buf->addr,
+	buf->area = dma_alloc_coherent(rt->platform->dev, size, &buf->addr,
 			GFP_KERNEL);
 	if (!buf->area) {
-		dev_err(soc_runtime->dev, "%s: Could not allocate DMA buffer\n",
+		dev_err(rt->platform->dev, "%s: Could not allocate DMA buffer\n",
 				__func__);
 		return -ENOMEM;
 	}
@@ -461,12 +461,12 @@
 }
 
 static void lpass_platform_free_buffer(struct snd_pcm_substream *substream,
-		struct snd_soc_pcm_runtime *soc_runtime)
+		struct snd_soc_pcm_runtime *rt)
 {
 	struct snd_dma_buffer *buf = &substream->dma_buffer;
 
 	if (buf->area) {
-		dma_free_coherent(soc_runtime->dev, buf->bytes, buf->area,
+		dma_free_coherent(rt->dev, buf->bytes, buf->area,
 				buf->addr);
 	}
 	buf->area = NULL;
@@ -499,9 +499,6 @@
 
 	snd_soc_pcm_set_drvdata(soc_runtime, data);
 
-	soc_runtime->dev->coherent_dma_mask = DMA_BIT_MASK(32);
-	soc_runtime->dev->dma_mask = &soc_runtime->dev->coherent_dma_mask;
-
 	ret = lpass_platform_alloc_buffer(substream, soc_runtime);
 	if (ret)
 		return ret;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 5a2812f..0d37079 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -310,7 +310,7 @@
 };
 
 static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
-	struct snd_kcontrol *kcontrol)
+	struct snd_kcontrol *kcontrol, const char *ctrl_name)
 {
 	struct dapm_kcontrol_data *data;
 	struct soc_mixer_control *mc;
@@ -333,7 +333,7 @@
 		if (mc->autodisable) {
 			struct snd_soc_dapm_widget template;
 
-			name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name,
+			name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
 					 "Autodisable");
 			if (!name) {
 				ret = -ENOMEM;
@@ -371,7 +371,7 @@
 		if (e->autodisable) {
 			struct snd_soc_dapm_widget template;
 
-			name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name,
+			name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
 					 "Autodisable");
 			if (!name) {
 				ret = -ENOMEM;
@@ -871,7 +871,7 @@
 
 		kcontrol->private_free = dapm_kcontrol_free;
 
-		ret = dapm_kcontrol_data_alloc(w, kcontrol);
+		ret = dapm_kcontrol_data_alloc(w, kcontrol, name);
 		if (ret) {
 			snd_ctl_free_one(kcontrol);
 			goto exit_free;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e898b42..1af4f23 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1810,7 +1810,8 @@
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
 			continue;
 
 		dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index cc39f63..007cf58 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -2455,7 +2455,6 @@
 	else
 		err = snd_usbmidi_create_endpoints(umidi, endpoints);
 	if (err < 0) {
-		snd_usbmidi_free(umidi);
 		return err;
 	}
 
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a75d9ce..4f6ce1c 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1121,6 +1121,7 @@
 	switch (chip->usb_id) {
 	case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
 	case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
@@ -1281,7 +1282,7 @@
 	case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
 	case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
 	case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
-	case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
+	case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
 		if (fp->altsetting == 2)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
@@ -1290,6 +1291,7 @@
 	case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
 	case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
 	case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
+	case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
 		if (fp->altsetting == 3)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 81a2eb7..05d8158 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -2068,6 +2068,15 @@
 		err = -ENOMEM;
 		goto err_free_queues;
 	}
+
+	/*
+	 * Since this thread will not be kept in any rbtree not in a
+	 * list, initialize its list node so that at thread__put() the
+	 * current thread lifetime assuption is kept and we don't segfault
+	 * at list_del_init().
+	 */
+	INIT_LIST_HEAD(&pt->unknown_thread->node);
+
 	err = thread__set_comm(pt->unknown_thread, "unknown", 0);
 	if (err)
 		goto err_delete_thread;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4f7b0ef..813d9b2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -399,6 +399,9 @@
 {
 	char help[BUFSIZ];
 
+	if (!e)
+		return;
+
 	/*
 	 * We get error directly from syscall errno ( > 0),
 	 * or from encoded pointer's error ( < 0).
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 2be10fb..4ce5c5e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -686,8 +686,9 @@
 		pf->fb_ops = NULL;
 #if _ELFUTILS_PREREQ(0, 142)
 	} else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
-		   pf->cfi != NULL) {
-		if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
+		   (pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) {
+		if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 &&
+		     (dwarf_cfi_addrframe(pf->cfi_dbg, pf->addr, &frame) != 0)) ||
 		    dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
 			pr_warning("Failed to get call frame on 0x%jx\n",
 				   (uintmax_t)pf->addr);
@@ -1015,8 +1016,7 @@
 	return DWARF_CB_OK;
 }
 
-/* Find probe points from debuginfo */
-static int debuginfo__find_probes(struct debuginfo *dbg,
+static int debuginfo__find_probe_location(struct debuginfo *dbg,
 				  struct probe_finder *pf)
 {
 	struct perf_probe_point *pp = &pf->pev->point;
@@ -1025,27 +1025,6 @@
 	Dwarf_Die *diep;
 	int ret = 0;
 
-#if _ELFUTILS_PREREQ(0, 142)
-	Elf *elf;
-	GElf_Ehdr ehdr;
-	GElf_Shdr shdr;
-
-	/* Get the call frame information from this dwarf */
-	elf = dwarf_getelf(dbg->dbg);
-	if (elf == NULL)
-		return -EINVAL;
-
-	if (gelf_getehdr(elf, &ehdr) == NULL)
-		return -EINVAL;
-
-	if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
-	    shdr.sh_type == SHT_PROGBITS) {
-		pf->cfi = dwarf_getcfi_elf(elf);
-	} else {
-		pf->cfi = dwarf_getcfi(dbg->dbg);
-	}
-#endif
-
 	off = 0;
 	pf->lcache = intlist__new(NULL);
 	if (!pf->lcache)
@@ -1108,6 +1087,39 @@
 	return ret;
 }
 
+/* Find probe points from debuginfo */
+static int debuginfo__find_probes(struct debuginfo *dbg,
+				  struct probe_finder *pf)
+{
+	int ret = 0;
+
+#if _ELFUTILS_PREREQ(0, 142)
+	Elf *elf;
+	GElf_Ehdr ehdr;
+	GElf_Shdr shdr;
+
+	if (pf->cfi_eh || pf->cfi_dbg)
+		return debuginfo__find_probe_location(dbg, pf);
+
+	/* Get the call frame information from this dwarf */
+	elf = dwarf_getelf(dbg->dbg);
+	if (elf == NULL)
+		return -EINVAL;
+
+	if (gelf_getehdr(elf, &ehdr) == NULL)
+		return -EINVAL;
+
+	if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+	    shdr.sh_type == SHT_PROGBITS)
+		pf->cfi_eh = dwarf_getcfi_elf(elf);
+
+	pf->cfi_dbg = dwarf_getcfi(dbg->dbg);
+#endif
+
+	ret = debuginfo__find_probe_location(dbg, pf);
+	return ret;
+}
+
 struct local_vars_finder {
 	struct probe_finder *pf;
 	struct perf_probe_arg *args;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index bed8271..0aec770 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -76,7 +76,10 @@
 
 	/* For variable searching */
 #if _ELFUTILS_PREREQ(0, 142)
-	Dwarf_CFI		*cfi;		/* Call Frame Information */
+	/* Call Frame Information from .eh_frame */
+	Dwarf_CFI		*cfi_eh;
+	/* Call Frame Information from .debug_frame */
+	Dwarf_CFI		*cfi_dbg;
 #endif
 	Dwarf_Op		*fb_ops;	/* Frame base attribute */
 	struct perf_probe_arg	*pvar;		/* Current target variable */
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2b58edc..afb0c45 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -311,6 +311,16 @@
 
 	aggr->val = aggr->ena = aggr->run = 0;
 
+	/*
+	 * We calculate counter's data every interval,
+	 * and the display code shows ps->res_stats
+	 * avg value. We need to zero the stats for
+	 * interval mode, otherwise overall avg running
+	 * averages will be shown for each interval.
+	 */
+	if (config->interval)
+		init_stats(ps->res_stats);
+
 	if (counter->per_pkg)
 		zero_per_pkg(counter);
 
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index 77edcdc..0572784 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -88,7 +88,11 @@
 		exit 1
 	fi
 
-	rm $file
+	rm $file 2>/dev/null
+	if [ $? -ne 0 ]; then
+		chattr -i $file
+		rm $file
+	fi
 
 	if [ -e $file ]; then
 		echo "$file couldn't be deleted" >&2
@@ -111,6 +115,7 @@
 		exit 1
 	fi
 
+	chattr -i $file
 	printf "$attrs" > $file
 
 	if [ -e $file ]; then
@@ -141,7 +146,11 @@
 			echo "$file could not be created" >&2
 			ret=1
 		else
-			rm $file
+			rm $file 2>/dev/null
+			if [ $? -ne 0 ]; then
+				chattr -i $file
+				rm $file
+			fi
 		fi
 	done
 
@@ -174,7 +183,11 @@
 
 		if [ -e $file ]; then
 			echo "Creating $file should have failed" >&2
-			rm $file
+			rm $file 2>/dev/null
+			if [ $? -ne 0 ]; then
+				chattr -i $file
+				rm $file
+			fi
 			ret=1
 		fi
 	done
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
index 8c07644..4af74f7 100644
--- a/tools/testing/selftests/efivarfs/open-unlink.c
+++ b/tools/testing/selftests/efivarfs/open-unlink.c
@@ -1,10 +1,68 @@
+#include <errno.h>
 #include <stdio.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <sys/ioctl.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
+#include <linux/fs.h>
+
+static int set_immutable(const char *path, int immutable)
+{
+	unsigned int flags;
+	int fd;
+	int rc;
+	int error;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0)
+		return fd;
+
+	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+	if (rc < 0) {
+		error = errno;
+		close(fd);
+		errno = error;
+		return rc;
+	}
+
+	if (immutable)
+		flags |= FS_IMMUTABLE_FL;
+	else
+		flags &= ~FS_IMMUTABLE_FL;
+
+	rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
+	error = errno;
+	close(fd);
+	errno = error;
+	return rc;
+}
+
+static int get_immutable(const char *path)
+{
+	unsigned int flags;
+	int fd;
+	int rc;
+	int error;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0)
+		return fd;
+
+	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+	if (rc < 0) {
+		error = errno;
+		close(fd);
+		errno = error;
+		return rc;
+	}
+	close(fd);
+	if (flags & FS_IMMUTABLE_FL)
+		return 1;
+	return 0;
+}
 
 int main(int argc, char **argv)
 {
@@ -27,7 +85,7 @@
 	buf[4] = 0;
 
 	/* create a test variable */
-	fd = open(path, O_WRONLY | O_CREAT);
+	fd = open(path, O_WRONLY | O_CREAT, 0600);
 	if (fd < 0) {
 		perror("open(O_WRONLY)");
 		return EXIT_FAILURE;
@@ -41,6 +99,18 @@
 
 	close(fd);
 
+	rc = get_immutable(path);
+	if (rc < 0) {
+		perror("ioctl(FS_IOC_GETFLAGS)");
+		return EXIT_FAILURE;
+	} else if (rc) {
+		rc = set_immutable(path, 0);
+		if (rc < 0) {
+			perror("ioctl(FS_IOC_SETFLAGS)");
+			return EXIT_FAILURE;
+		}
+	}
+
 	fd = open(path, O_RDONLY);
 	if (fd < 0) {
 		perror("open");
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 69bca18..ea60646 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -143,7 +143,7 @@
  * Check if there was a change in the timer state (should we raise or lower
  * the line level to the GIC).
  */
-static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
 {
 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 
@@ -154,10 +154,12 @@
 	 * until we call this function from kvm_timer_flush_hwstate.
 	 */
 	if (!vgic_initialized(vcpu->kvm))
-	    return;
+		return -ENODEV;
 
 	if (kvm_timer_should_fire(vcpu) != timer->irq.level)
 		kvm_timer_update_irq(vcpu, !timer->irq.level);
+
+	return 0;
 }
 
 /*
@@ -218,7 +220,8 @@
 	bool phys_active;
 	int ret;
 
-	kvm_timer_update_state(vcpu);
+	if (kvm_timer_update_state(vcpu))
+		return;
 
 	/*
 	* If we enter the guest with the virtual input level to the VGIC