Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
 "The itimer removal one is not strictly a fix, but I really wanted to
  avoid a rebase of the urgent ones."

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  Revert "clocksource: Load the ACPI PM clocksource asynchronously"
  clockevents: tTack broadcast device mode change in tick_broadcast_switch_to_oneshot()
  itimer: Use printk_once instead of WARN_ONCE
  nohz: Fix stale jiffies update in tick_nohz_restart()
  tick: Document TICK_ONESHOT config option
  proc: stats: Use arch_idle_time for idle and iowait times if available
  itimer: Schedule silent NULL pointer fixup in setitimer() for removal
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
index 23a43b8..2a7f9a0 100644
--- a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -55,7 +55,7 @@
 Date:		August 2008
 Contact:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 Description:
-		This file determines if the the transaction of the USB TMC
+		This file determines if the transaction of the USB TMC
 		device is to be automatically aborted if there is any error.
 		For more details about this, please see the document,
 		"Universal Serial Bus Test and Measurement Class Specification
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-format b/Documentation/ABI/testing/sysfs-bus-event_source-devices-format
new file mode 100644
index 0000000..079afc7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-format
@@ -0,0 +1,14 @@
+Where:		/sys/bus/event_source/devices/<dev>/format
+Date:		January 2012
+Kernel Version: 3.3
+Contact:	Jiri Olsa <jolsa@redhat.com>
+Description:
+		Attribute group to describe the magic bits that go into
+		perf_event_attr::config[012] for a particular pmu.
+		Each attribute of this group defines the 'hardware' bitmask
+		we want to export, so that userspace can deal with sane
+		name/value pairs.
+
+		Example: 'config1:1,6-10,44'
+		Defines contents of attribute that occupies bits 1,6-10,44 of
+		perf_event_attr::config1.
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi
index 4f9ba3c..dd930c8 100644
--- a/Documentation/ABI/testing/sysfs-firmware-acpi
+++ b/Documentation/ABI/testing/sysfs-firmware-acpi
@@ -1,3 +1,23 @@
+What:		/sys/firmware/acpi/bgrt/
+Date:		January 2012
+Contact:	Matthew Garrett <mjg@redhat.com>
+Description:
+		The BGRT is an ACPI 5.0 feature that allows the OS
+		to obtain a copy of the firmware boot splash and
+		some associated metadata. This is intended to be used
+		by boot splash applications in order to interact with
+		the firmware boot splash in order to avoid jarring
+		transitions.
+
+		image: The image bitmap. Currently a 32-bit BMP.
+		status: 1 if the image is valid, 0 if firmware invalidated it.
+		type: 0 indicates image is in BMP format.
+		version: The version of the BGRT. Currently 1.
+		xoffset: The number of pixels between the left of the screen
+			 and the left edge of the image.
+		yoffset: The number of pixels between the top of the screen
+			 and the top edge of the image.
+
 What:		/sys/firmware/acpi/interrupts/
 Date:		February 2008
 Contact:	Len Brown <lenb@kernel.org>
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 2b90d32..c58b236 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -793,6 +793,35 @@
 work correctly.
 
 
+		Chapter 19:  Inline assembly
+
+In architecture-specific code, you may need to use inline assembly to interface
+with CPU or platform functionality.  Don't hesitate to do so when necessary.
+However, don't use inline assembly gratuitously when C can do the job.  You can
+and should poke hardware from C when possible.
+
+Consider writing simple helper functions that wrap common bits of inline
+assembly, rather than repeatedly writing them with slight variations.  Remember
+that inline assembly can use C parameters.
+
+Large, non-trivial assembly functions should go in .S files, with corresponding
+C prototypes defined in C header files.  The C prototypes for assembly
+functions should use "asmlinkage".
+
+You may need to mark your asm statement as volatile, to prevent GCC from
+removing it if GCC doesn't notice any side effects.  You don't always need to
+do so, though, and doing so unnecessarily can limit optimization.
+
+When writing a single inline assembly statement containing multiple
+instructions, put each instruction on a separate line in a separate quoted
+string, and end each string except the last with \n\t to properly indent the
+next instruction in the assembly output:
+
+	asm ("magic %reg1, #42\n\t"
+	     "more_magic %reg2, %reg3"
+	     : /* outputs */ : /* inputs */ : /* clobbers */);
+
+
 
 		Appendix I: References
 
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index b768cc0..5c72eed 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -31,3 +31,21 @@
 Since it is optional for platforms to implement DMA_ATTR_WEAK_ORDERING,
 those that do not will simply ignore the attribute and exhibit default
 behavior.
+
+DMA_ATTR_WRITE_COMBINE
+----------------------
+
+DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be
+buffered to improve performance.
+
+Since it is optional for platforms to implement DMA_ATTR_WRITE_COMBINE,
+those that do not will simply ignore the attribute and exhibit default
+behavior.
+
+DMA_ATTR_NON_CONSISTENT
+-----------------------
+
+DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
+consistent or non-consistent memory as it sees fit.  By using this API,
+you are guaranteeing to the platform that you have all the correct and
+necessary sync points for this memory in the driver.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 9c27e51..7514dbf 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -446,4 +446,21 @@
 !Edrivers/i2c/i2c-core.c
   </chapter>
 
+  <chapter id="hsi">
+     <title>High Speed Synchronous Serial Interface (HSI)</title>
+
+     <para>
+	High Speed Synchronous Serial Interface (HSI) is a
+	serial interface mainly used for connecting application
+	engines (APE) with cellular modem engines (CMT) in cellular
+	handsets.
+
+	HSI provides multiplexing for up to 16 logical channels,
+	low-latency and full duplex communication.
+     </para>
+
+!Iinclude/linux/hsi/hsi.h
+!Edrivers/hsi/hsi.c
+  </chapter>
+
 </book>
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
index e7cc363..e20b6da 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/acpi/apei/einj.txt
@@ -53,6 +53,14 @@
   This file is used to set the second error parameter value. Effect of
   parameter depends on error_type specified.
 
+- notrigger
+  The EINJ mechanism is a two step process. First inject the error, then
+  perform some actions to trigger it. Setting "notrigger" to 1 skips the
+  trigger phase, which *may* allow the user to cause the error in some other
+  context by a simple access to the cpu, memory location, or device that is
+  the target of the error injection. Whether this actually works depends
+  on what operations the BIOS actually includes in the trigger phase.
+
 BIOS versions based in the ACPI 4.0 specification have limited options
 to control where the errors are injected.  Your BIOS may support an
 extension (enabled with the param_extension=1 module parameter, or
diff --git a/Documentation/aoe/aoe.txt b/Documentation/aoe/aoe.txt
index b5aada9..5f5aa16 100644
--- a/Documentation/aoe/aoe.txt
+++ b/Documentation/aoe/aoe.txt
@@ -35,7 +35,7 @@
     sh Documentation/aoe/mkshelf.sh /dev/etherd 0
 
   There is also an autoload script that shows how to edit
-  /etc/modprobe.conf to ensure that the aoe module is loaded when
+  /etc/modprobe.d/aoe.conf to ensure that the aoe module is loaded when
   necessary.
 
 USING DEVICE NODES
diff --git a/Documentation/aoe/autoload.sh b/Documentation/aoe/autoload.sh
index 78dad13..815dff4 100644
--- a/Documentation/aoe/autoload.sh
+++ b/Documentation/aoe/autoload.sh
@@ -1,8 +1,8 @@
 #!/bin/sh
 # set aoe to autoload by installing the
-# aliases in /etc/modprobe.conf
+# aliases in /etc/modprobe.d/
 
-f=/etc/modprobe.conf
+f=/etc/modprobe.d/aoe.conf
 
 if test ! -r $f || test ! -w $f; then
 	echo "cannot configure $f for module autoloading" 1>&2
diff --git a/Documentation/blockdev/floppy.txt b/Documentation/blockdev/floppy.txt
index 6ccab88..470fe4b 100644
--- a/Documentation/blockdev/floppy.txt
+++ b/Documentation/blockdev/floppy.txt
@@ -49,7 +49,7 @@
 
  options floppy omnibook messages
 
-in /etc/modprobe.conf.
+in a configuration file in /etc/modprobe.d/.
 
 
  The floppy driver related options are:
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 5c51ed4..cefd3d8 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -217,7 +217,7 @@
 
 The cpus and mems files in the root (top_cpuset) cpuset are
 read-only.  The cpus file automatically tracks the value of
-cpu_online_map using a CPU hotplug notifier, and the mems file
+cpu_online_mask using a CPU hotplug notifier, and the mems file
 automatically tracks the value of node_states[N_HIGH_MEMORY]--i.e.,
 nodes with memory--using the cpuset_track_online_nodes() hook.
 
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 4c95c00..9b1067a 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -34,8 +34,7 @@
 
 Features:
  - accounting anonymous pages, file caches, swap caches usage and limiting them.
- - private LRU and reclaim routine. (system's global LRU and private LRU
-   work independently from each other)
+ - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
  - optionally, memory+swap usage can be accounted and limited.
  - hierarchical accounting
  - soft limit
@@ -154,7 +153,7 @@
 2.2.1 Accounting details
 
 All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
-Some pages which are never reclaimable and will not be on the global LRU
+Some pages which are never reclaimable and will not be on the LRU
 are not accounted. We just account pages under usual VM management.
 
 RSS pages are accounted at page_fault unless they've already been accounted
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index a20bfd4..66ef8f3 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -47,7 +47,7 @@
              other cpus later online, read FAQ's for more info.
 
 additional_cpus=n (*)	Use this to limit hotpluggable cpus. This option sets
-  			cpu_possible_map = cpu_present_map + additional_cpus
+  			cpu_possible_mask = cpu_present_mask + additional_cpus
 
 cede_offline={"off","on"}  Use this option to disable/enable putting offlined
 		            processors to an extended H_CEDE state on
@@ -64,11 +64,11 @@
 on the apicid values in those tables for disabled apics. In the event
 BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could
 use this parameter "additional_cpus=x" to represent those cpus in the
-cpu_possible_map.
+cpu_possible_mask.
 
 possible_cpus=n		[s390,x86_64] use this to set hotpluggable cpus.
 			This option sets possible_cpus bits in
-			cpu_possible_map. Thus keeping the numbers of bits set
+			cpu_possible_mask. Thus keeping the numbers of bits set
 			constant even if the machine gets rebooted.
 
 CPU maps and such
@@ -76,7 +76,7 @@
 [More on cpumaps and primitive to manipulate, please check
 include/linux/cpumask.h that has more descriptive text.]
 
-cpu_possible_map: Bitmap of possible CPUs that can ever be available in the
+cpu_possible_mask: Bitmap of possible CPUs that can ever be available in the
 system. This is used to allocate some boot time memory for per_cpu variables
 that aren't designed to grow/shrink as CPUs are made available or removed.
 Once set during boot time discovery phase, the map is static, i.e no bits
@@ -84,13 +84,13 @@
 upfront can save some boot time memory. See below for how we use heuristics
 in x86_64 case to keep this under check.
 
-cpu_online_map: Bitmap of all CPUs currently online. Its set in __cpu_up()
+cpu_online_mask: Bitmap of all CPUs currently online. Its set in __cpu_up()
 after a cpu is available for kernel scheduling and ready to receive
 interrupts from devices. Its cleared when a cpu is brought down using
 __cpu_disable(), before which all OS services including interrupts are
 migrated to another target CPU.
 
-cpu_present_map: Bitmap of CPUs currently present in the system. Not all
+cpu_present_mask: Bitmap of CPUs currently present in the system. Not all
 of them may be online. When physical hotplug is processed by the relevant
 subsystem (e.g ACPI) can change and new bit either be added or removed
 from the map depending on the event is hot-add/hot-remove. There are currently
@@ -99,22 +99,22 @@
 
 You really dont need to manipulate any of the system cpu maps. They should
 be read-only for most use. When setting up per-cpu resources almost always use
-cpu_possible_map/for_each_possible_cpu() to iterate.
+cpu_possible_mask/for_each_possible_cpu() to iterate.
 
 Never use anything other than cpumask_t to represent bitmap of CPUs.
 
 	#include <linux/cpumask.h>
 
-	for_each_possible_cpu     - Iterate over cpu_possible_map
-	for_each_online_cpu       - Iterate over cpu_online_map
-	for_each_present_cpu      - Iterate over cpu_present_map
+	for_each_possible_cpu     - Iterate over cpu_possible_mask
+	for_each_online_cpu       - Iterate over cpu_online_mask
+	for_each_present_cpu      - Iterate over cpu_present_mask
 	for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
 
 	#include <linux/cpu.h>
 	get_online_cpus() and put_online_cpus():
 
 The above calls are used to inhibit cpu hotplug operations. While the
-cpu_hotplug.refcount is non zero, the cpu_online_map will not change.
+cpu_hotplug.refcount is non zero, the cpu_online_mask will not change.
 If you merely need to avoid cpus going away, you could also use
 preempt_disable() and preempt_enable() for those sections.
 Just remember the critical section cannot call any
diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt
index 50d7b16..9d28a34 100644
--- a/Documentation/cpuidle/sysfs.txt
+++ b/Documentation/cpuidle/sysfs.txt
@@ -36,6 +36,7 @@
 /sys/devices/system/cpu/cpu0/cpuidle/state0:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -45,6 +46,7 @@
 /sys/devices/system/cpu/cpu0/cpuidle/state1:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -54,6 +56,7 @@
 /sys/devices/system/cpu/cpu0/cpuidle/state2:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -63,6 +66,7 @@
 /sys/devices/system/cpu/cpu0/cpuidle/state3:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -72,6 +76,7 @@
 
 
 * desc : Small description about the idle state (string)
+* disable : Option to disable this idle state (bool)
 * latency : Latency to exit out of this idle state (in microseconds)
 * name : Name of the idle state (string)
 * power : Power consumed while in this idle state (in milliwatts)
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 5903ecf..a200695 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -27,13 +27,13 @@
 	reg = <0x40000000 0x10000000
 	       0xffffe800 0x200
 	      >;
-	atmel,nand-addr-offset = <21>;
-	atmel,nand-cmd-offset = <22>;
+	atmel,nand-addr-offset = <21>;	/* ale */
+	atmel,nand-cmd-offset = <22>;	/* cle */
 	nand-on-flash-bbt;
 	nand-ecc-mode = "soft";
-	gpios = <&pioC 13 0
-		 &pioC 14 0
-		 0
+	gpios = <&pioC 13 0	/* rdy */
+		 &pioC 14 0 	/* nce */
+		 0		/* cd */
 		>;
 	partition@0 {
 		...
diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
new file mode 100644
index 0000000..e2c663b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
@@ -0,0 +1,33 @@
+* FSMC NAND
+
+Required properties:
+- compatible : "st,spear600-fsmc-nand"
+- reg : Address range of the mtd chip
+- reg-names: Should contain the reg names "fsmc_regs" and "nand_data"
+- st,ale-off : Chip specific offset to ALE
+- st,cle-off : Chip specific offset to CLE
+
+Optional properties:
+- bank-width : Width (in bytes) of the device.  If not present, the width
+  defaults to 1 byte
+- nand-skip-bbtscan: Indicates the the BBT scanning should be skipped
+
+Example:
+
+	fsmc: flash@d1800000 {
+		compatible = "st,spear600-fsmc-nand";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xd1800000 0x1000	/* FSMC Register */
+		       0xd2000000 0x4000>;	/* NAND Base */
+		reg-names = "fsmc_regs", "nand_data";
+		st,ale-off = <0x20000>;
+		st,cle-off = <0x10000>;
+
+		bank-width = <1>;
+		nand-skip-bbtscan;
+
+		partition@0 {
+			...
+		};
+	};
diff --git a/Documentation/devicetree/bindings/mtd/spear_smi.txt b/Documentation/devicetree/bindings/mtd/spear_smi.txt
new file mode 100644
index 0000000..7248aad
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/spear_smi.txt
@@ -0,0 +1,31 @@
+* SPEAr SMI
+
+Required properties:
+- compatible : "st,spear600-smi"
+- reg : Address range of the mtd chip
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+  representing partitions.
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the STMMAC interrupts
+- clock-rate : Functional clock rate of SMI in Hz
+
+Optional properties:
+- st,smi-fast-mode : Flash supports read in fast mode
+
+Example:
+
+	smi: flash@fc000000 {
+		compatible = "st,spear600-smi";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xfc000000 0x1000>;
+		interrupt-parent = <&vic1>;
+		interrupts = <12>;
+		clock-rate = <50000000>;	/* 50MHz */
+
+		flash@f8000000 {
+			st,smi-fast-mode;
+			...
+		};
+	};
diff --git a/Documentation/devicetree/bindings/power_supply/max17042_battery.txt b/Documentation/devicetree/bindings/power_supply/max17042_battery.txt
new file mode 100644
index 0000000..5bc9b68
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/max17042_battery.txt
@@ -0,0 +1,18 @@
+max17042_battery
+~~~~~~~~~~~~~~~~
+
+Required properties :
+ - compatible : "maxim,max17042"
+
+Optional properties :
+ - maxim,rsns-microohm : Resistance of rsns resistor in micro Ohms
+                         (datasheet-recommended value is 10000).
+   Defining this property enables current-sense functionality.
+
+Example:
+
+	battery-charger@36 {
+		compatible = "maxim,max17042";
+		reg = <0x36>;
+		maxim,rsns-microohm = <10000>;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/anatop-regulator.txt b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
new file mode 100644
index 0000000..357758c
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
@@ -0,0 +1,29 @@
+Anatop Voltage regulators
+
+Required properties:
+- compatible: Must be "fsl,anatop-regulator"
+- anatop-reg-offset: Anatop MFD register offset
+- anatop-vol-bit-shift: Bit shift for the register
+- anatop-vol-bit-width: Number of bits used in the register
+- anatop-min-bit-val: Minimum value of this register
+- anatop-min-voltage: Minimum voltage of this regulator
+- anatop-max-voltage: Maximum voltage of this regulator
+
+Any property defined as part of the core regulator
+binding, defined in regulator.txt, can also be used.
+
+Example:
+
+	regulator-vddpu {
+		compatible = "fsl,anatop-regulator";
+		regulator-name = "vddpu";
+		regulator-min-microvolt = <725000>;
+		regulator-max-microvolt = <1300000>;
+		regulator-always-on;
+		anatop-reg-offset = <0x140>;
+		anatop-vol-bit-shift = <9>;
+		anatop-vol-bit-width = <5>;
+		anatop-min-bit-val = <1>;
+		anatop-min-voltage = <725000>;
+		anatop-max-voltage = <1300000>;
+	};
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 0c083c5..b4a898f 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -158,7 +158,6 @@
 logo_*_clut224.c
 logo_*_mono.c
 lxdialog
-mach
 mach-types
 mach-types.h
 machtypes.h
diff --git a/Documentation/fb/intel810.txt b/Documentation/fb/intel810.txt
index be3e783..a8e9f5b 100644
--- a/Documentation/fb/intel810.txt
+++ b/Documentation/fb/intel810.txt
@@ -211,7 +211,7 @@
 	modprobe i810fb vram=2 xres=1024 bpp=8 hsync1=30 hsync2=55 vsync1=50 \
 	         vsync2=85 accel=1 mtrr=1
 
-Or just add the following to /etc/modprobe.conf
+Or just add the following to a configuration file in /etc/modprobe.d/
 
 	options i810fb vram=2 xres=1024 bpp=16 hsync1=30 hsync2=55 vsync1=50 \
 	vsync2=85 accel=1 mtrr=1
diff --git a/Documentation/fb/intelfb.txt b/Documentation/fb/intelfb.txt
index dd9e944..feac4e4 100644
--- a/Documentation/fb/intelfb.txt
+++ b/Documentation/fb/intelfb.txt
@@ -120,7 +120,7 @@
 
 	modprobe intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1
 
-Or just add the following to /etc/modprobe.conf
+Or just add the following to a configuration file in /etc/modprobe.d/
 
 	options intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1
 
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 32fae81..03ca210 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,14 +6,6 @@
 
 ---------------------------
 
-What:	x86 floppy disable_hlt
-When:	2012
-Why:	ancient workaround of dubious utility clutters the
-	code used by everybody else.
-Who:	Len Brown <len.brown@intel.com>
-
----------------------------
-
 What:	CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle
 When:	2012
 Why:	This optional sub-feature of APM is of dubious reliability,
@@ -532,6 +524,16 @@
 
 ----------------------------
 
+What:	get_robust_list syscall
+When:	2013
+Why:	There appear to be no production users of the get_robust_list syscall,
+	and it runs the risk of leaking address locations, allowing the bypass
+	of ASLR. It was only ever intended for debugging, so it should be
+	removed.
+Who:	Kees Cook <keescook@chromium.org>
+
+----------------------------
+
 What:	setitimer accepts user NULL pointer (value)
 When:	3.6
 Why:	setitimer is not returning -EFAULT if user pointer is NULL. This
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index e916e3d..0d04920 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -114,7 +114,7 @@
 struct file_system_type {
 	const char *name;
 	int fs_flags;
-        struct dentry (*mount) (struct file_system_type *, int,
+        struct dentry *(*mount) (struct file_system_type *, int,
                        const char *, void *);
         void (*kill_sb) (struct super_block *);
         struct module *owner;
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index a10f736..90956b6 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -11,7 +11,7 @@
   Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
 * AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
 * AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
-* AMD Family 15h processors: "Bulldozer"
+* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity"
 
   Prefix: 'k10temp'
   Addresses scanned: PCI space
diff --git a/Documentation/i2c/busses/scx200_acb b/Documentation/i2c/busses/scx200_acb
index 7c07883d..ce83c87 100644
--- a/Documentation/i2c/busses/scx200_acb
+++ b/Documentation/i2c/busses/scx200_acb
@@ -28,5 +28,5 @@
 parameter to your boot command line:
   scx200_acb.base=0x810,0x820
 If the scx200_acb driver is built as a module, add the following line to
-the file /etc/modprobe.conf instead:
+a configuration file in /etc/modprobe.d/ instead:
   options scx200_acb base=0x810,0x820
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index e77bebf..7aca987 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -169,7 +169,7 @@
 
 	alias block-major-3 ide-probe
 
-to /etc/modprobe.conf.
+to a configuration file in /etc/modprobe.d/.
 
 When ide.c is used as a module, you can pass command line parameters to the
 driver using the "options=" keyword to insmod, while replacing any ',' with
diff --git a/Documentation/input/input.txt b/Documentation/input/input.txt
index b3d6787..666c06c 100644
--- a/Documentation/input/input.txt
+++ b/Documentation/input/input.txt
@@ -250,8 +250,8 @@
 a USB keyboard works and is correctly connected to the kernel keyboard
 driver. 
 
-  Doing a cat /dev/input/mouse0 (c, 13, 32) will verify that a mouse
-is also emulated, characters should appear if you move it.
+  Doing a "cat /dev/input/mouse0" (c, 13, 32) will verify that a mouse
+is also emulated; characters should appear if you move it.
 
   You can test the joystick emulation with the 'jstest' utility,
 available in the joystick package (see Documentation/input/joystick.txt).
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 3b7488f..e34b531d 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -225,6 +225,7 @@
 'j'	00-3F	linux/joystick.h
 'k'	00-0F	linux/spi/spidev.h	conflict!
 'k'	00-05	video/kyro.h		conflict!
+'k'	10-17	linux/hsi/hsi_char.h	HSI character device
 'l'	00-3F	linux/tcfs_fs.h		transparent cryptographic file system
 					<http://web.archive.org/web/*/http://mikonos.dia.unisa.it/tcfs>
 'l'	40-7F	linux/udf_fs_i.h	in development:
diff --git a/Documentation/isdn/README.gigaset b/Documentation/isdn/README.gigaset
index ef3343e..7534c60 100644
--- a/Documentation/isdn/README.gigaset
+++ b/Documentation/isdn/README.gigaset
@@ -97,8 +97,7 @@
 				   2.5.): 1=on (default), 0=off
 
      Depending on your distribution you may want to create a separate module
-     configuration file /etc/modprobe.d/gigaset for these, or add them to a
-     custom file like /etc/modprobe.conf.local.
+     configuration file like /etc/modprobe.d/gigaset.conf for these.
 
 2.2. Device nodes for user space programs
      ------------------------------------
@@ -212,8 +211,8 @@
 
         options ppp_async flag_time=0
 
-     to an appropriate module configuration file, like /etc/modprobe.d/gigaset
-     or /etc/modprobe.conf.local.
+     to an appropriate module configuration file, like
+     /etc/modprobe.d/gigaset.conf.
 
      Unimodem mode is needed for making some devices [e.g. SX100] work which
      do not support the regular Gigaset command set. If debug output (see
@@ -237,8 +236,8 @@
 	modprobe usb_gigaset startmode=0
      or by adding a line like
 	options usb_gigaset startmode=0
-     to an appropriate module configuration file, like /etc/modprobe.d/gigaset
-     or /etc/modprobe.conf.local.
+     to an appropriate module configuration file, like
+     /etc/modprobe.d/gigaset.conf
 
 2.6. Call-ID (CID) mode
      ------------------
@@ -310,7 +309,7 @@
 
            options isdn dialtimeout=15
 
-        to /etc/modprobe.d/gigaset, /etc/modprobe.conf.local or a similar file.
+        to /etc/modprobe.d/gigaset.conf or a similar file.
 
      Problem:
         The isdnlog program emits error messages or just doesn't work.
@@ -350,8 +349,7 @@
      The initial value can be set using the debug parameter when loading the
      module "gigaset", e.g. by adding a line
         options gigaset debug=0
-     to your module configuration file, eg. /etc/modprobe.d/gigaset or
-     /etc/modprobe.conf.local.
+     to your module configuration file, eg. /etc/modprobe.d/gigaset.conf
 
      Generated debugging information can be found
      - as output of the command
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index c313d71..9d5f2a9 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -28,12 +28,10 @@
 
 	grep "(NEW)" conf.new
 
-to see the new config symbols or you can 'diff' the previous and
-new .config files to see the differences:
+to see the new config symbols or you can use diffconfig to see the
+differences between the previous and new .config files:
 
-	diff .config.old .config | less
-
-(Yes, we need something better here.)
+	scripts/diffconfig .config.old .config | less
 
 ______________________________________________________________________
 Environment variables for '*config'
diff --git a/Documentation/laptops/sonypi.txt b/Documentation/laptops/sonypi.txt
index 4857acf..606bdb9 100644
--- a/Documentation/laptops/sonypi.txt
+++ b/Documentation/laptops/sonypi.txt
@@ -110,7 +110,7 @@
 -----------
 
 In order to automatically load the sonypi module on use, you can put those
-lines in your /etc/modprobe.conf file:
+lines a configuration file in /etc/modprobe.d/:
 
 	alias char-major-10-250 sonypi
 	options sonypi minor=250
diff --git a/Documentation/mono.txt b/Documentation/mono.txt
index e8e1758..d01ac60 100644
--- a/Documentation/mono.txt
+++ b/Documentation/mono.txt
@@ -38,11 +38,11 @@
         /sbin/modprobe binfmt_misc
 	# Some distributions, like Fedora Core, perform
 	# the following command automatically when the
-	# binfmt_misc module is loaded into the kernel.
+	# binfmt_misc module is loaded into the kernel
+	# or during normal boot up (systemd-based systems).
 	# Thus, it is possible that the following line
-	# is not needed at all. Look at /etc/modprobe.conf
-	# to check whether this is applicable or not.
-        mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
+	# is not needed at all.
+	mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
 fi
 
 # Register support for .NET CLR binaries
diff --git a/Documentation/networking/baycom.txt b/Documentation/networking/baycom.txt
index 4e68849..688f18f 100644
--- a/Documentation/networking/baycom.txt
+++ b/Documentation/networking/baycom.txt
@@ -93,7 +93,7 @@
 modems it should access at which ports. This can be done with the setbaycom
 utility. If you are only using one modem, you can also configure the
 driver from the insmod command line (or by means of an option line in
-/etc/modprobe.conf).
+/etc/modprobe.d/*.conf).
 
 Examples:
   modprobe baycom_ser_fdx mode="ser12*" iobase=0x3f8 irq=4
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 080ad26..bfea8a3 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -173,9 +173,8 @@
 
 	Module options may be given as command line arguments to the
 insmod or modprobe command, but are usually specified in either the
-/etc/modules.conf or /etc/modprobe.conf configuration file, or in a
-distro-specific configuration file (some of which are detailed in the next
-section).
+/etc/modrobe.d/*.conf configuration files, or in a distro-specific
+configuration file (some of which are detailed in the next section).
 
 	Details on bonding support for sysfs is provided in the
 "Configuring Bonding Manually via Sysfs" section, below.
@@ -1021,7 +1020,7 @@
 
 	Because the sysconfig scripts supply the bonding module
 options in the ifcfg-bondX file, it is not necessary to add them to
-the system /etc/modules.conf or /etc/modprobe.conf configuration file.
+the system /etc/modules.d/*.conf configuration files.
 
 3.2 Configuration with Initscripts Support
 ------------------------------------------
@@ -1098,15 +1097,13 @@
 	arp_ip_target=+192.168.1.1 arp_ip_target=+192.168.1.2
 
 	is the proper syntax to specify multiple targets.  When specifying
-options via BONDING_OPTS, it is not necessary to edit /etc/modules.conf or
-/etc/modprobe.conf.
+options via BONDING_OPTS, it is not necessary to edit /etc/modprobe.d/*.conf.
 
 	For even older versions of initscripts that do not support
-BONDING_OPTS, it is necessary to edit /etc/modules.conf (or
-/etc/modprobe.conf, depending upon your distro) to load the bonding module
-with your desired options when the bond0 interface is brought up.  The
-following lines in /etc/modules.conf (or modprobe.conf) will load the
-bonding module, and select its options:
+BONDING_OPTS, it is necessary to edit /etc/modprobe.d/*.conf, depending upon
+your distro) to load the bonding module with your desired options when the
+bond0 interface is brought up.  The following lines in /etc/modprobe.d/*.conf
+will load the bonding module, and select its options:
 
 alias bond0 bonding
 options bond0 mode=balance-alb miimon=100
@@ -1152,7 +1149,7 @@
 version 8.
 
 	The general method for these systems is to place the bonding
-module parameters into /etc/modules.conf or /etc/modprobe.conf (as
+module parameters into a config file in /etc/modprobe.d/ (as
 appropriate for the installed distro), then add modprobe and/or
 ifenslave commands to the system's global init script.  The name of
 the global init script differs; for sysconfig, it is
@@ -1228,7 +1225,7 @@
 specify a different name for each instance (the module loading system
 requires that every loaded module, even multiple instances of the same
 module, have a unique name).  This is accomplished by supplying multiple
-sets of bonding options in /etc/modprobe.conf, for example:
+sets of bonding options in /etc/modprobe.d/*.conf, for example:
 
 alias bond0 bonding
 options bond0 -o bond0 mode=balance-rr miimon=100
@@ -1793,8 +1790,8 @@
 	On systems with network configuration scripts that do not
 associate physical devices directly with network interface names (so
 that the same physical device always has the same "ethX" name), it may
-be necessary to add some special logic to either /etc/modules.conf or
-/etc/modprobe.conf (depending upon which is installed on the system).
+be necessary to add some special logic to config files in
+/etc/modprobe.d/.
 
 	For example, given a modules.conf containing the following:
 
@@ -1821,20 +1818,15 @@
 bonding is loaded.  This command is fully documented in the
 modules.conf manual page.
 
-	On systems utilizing modprobe.conf (or modprobe.conf.local),
-an equivalent problem can occur.  In this case, the following can be
-added to modprobe.conf (or modprobe.conf.local, as appropriate), as
-follows (all on one line; it has been split here for clarity):
+	On systems utilizing modprobe an equivalent problem can occur.
+In this case, the following can be added to config files in
+/etc/modprobe.d/ as:
 
-install bonding /sbin/modprobe tg3; /sbin/modprobe e1000;
-	/sbin/modprobe --ignore-install bonding
+softdep bonding pre: tg3 e1000
 
-	This will, when loading the bonding module, rather than
-performing the normal action, instead execute the provided command.
-This command loads the device drivers in the order needed, then calls
-modprobe with --ignore-install to cause the normal action to then take
-place.  Full documentation on this can be found in the modprobe.conf
-and modprobe manual pages.
+	This will load tg3 and e1000 modules before loading the bonding one.
+Full documentation on this can be found in the modprobe.d and modprobe
+manual pages.
 
 8.3. Painfully Slow Or No Failed Link Detection By Miimon
 ---------------------------------------------------------
diff --git a/Documentation/networking/dl2k.txt b/Documentation/networking/dl2k.txt
index 10e8490..cba74f7 100644
--- a/Documentation/networking/dl2k.txt
+++ b/Documentation/networking/dl2k.txt
@@ -45,12 +45,13 @@
 "ifconfig". If tested ok, continue the next step.
 
 4. cp dl2k.ko /lib/modules/`uname -r`/kernel/drivers/net
-5. Add the following line to /etc/modprobe.conf:
+5. Add the following line to /etc/modprobe.d/dl2k.conf:
 	alias eth0 dl2k
-6. Run "netconfig" or "netconf" to create configuration script ifcfg-eth0
+6. Run depmod to updated module indexes.
+7. Run "netconfig" or "netconf" to create configuration script ifcfg-eth0
    located at /etc/sysconfig/network-scripts or create it manually.
    [see - Configuration Script Sample]
-7. Driver will automatically load and configure at next boot time.
+8. Driver will automatically load and configure at next boot time.
 
 Compiling the Driver
 ====================
@@ -154,8 +155,8 @@
   -----------------
   1. Copy dl2k.o to the network modules directory, typically
      /lib/modules/2.x.x-xx/net or /lib/modules/2.x.x/kernel/drivers/net.
-  2. Locate the boot module configuration file, most commonly modprobe.conf
-     or modules.conf (for 2.4) in the /etc directory. Add the following lines:
+  2. Locate the boot module configuration file, most commonly in the
+     /etc/modprobe.d/ directory. Add the following lines:
 
      alias ethx dl2k
      options dl2k <optional parameters>
diff --git a/Documentation/networking/driver.txt b/Documentation/networking/driver.txt
index 03283da..da59e28 100644
--- a/Documentation/networking/driver.txt
+++ b/Documentation/networking/driver.txt
@@ -2,16 +2,16 @@
 
 Transmit path guidelines:
 
-1) The hard_start_xmit method must never return '1' under any
-   normal circumstances.  It is considered a hard error unless
+1) The ndo_start_xmit method must not return NETDEV_TX_BUSY under
+   any normal circumstances.  It is considered a hard error unless
    there is no way your device can tell ahead of time when it's
    transmit function will become busy.
 
    Instead it must maintain the queue properly.  For example,
    for a driver implementing scatter-gather this means:
 
-	static int drv_hard_start_xmit(struct sk_buff *skb,
-		   		       struct net_device *dev)
+	static netdev_tx_t drv_hard_start_xmit(struct sk_buff *skb,
+					       struct net_device *dev)
 	{
 		struct drv *dp = netdev_priv(dev);
 
@@ -23,7 +23,7 @@
 			unlock_tx(dp);
 			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
 			       dev->name);
-			return 1;
+			return NETDEV_TX_BUSY;
 		}
 
 		... queue packet to card ...
@@ -35,6 +35,7 @@
 		...
 		unlock_tx(dp);
 		...
+		return NETDEV_TX_OK;
 	}
 
    And then at the end of your TX reclamation event handling:
@@ -58,15 +59,12 @@
             TX_BUFFS_AVAIL(dp) > 0)
 		netif_wake_queue(dp->dev);
 
-2) Do not forget to update netdev->trans_start to jiffies after
-   each new tx packet is given to the hardware.
-
-3) A hard_start_xmit method must not modify the shared parts of a
+2) An ndo_start_xmit method must not modify the shared parts of a
    cloned SKB.
 
-4) Do not forget that once you return 0 from your hard_start_xmit
-   method, it is your driver's responsibility to free up the SKB
-   and in some finite amount of time.
+3) Do not forget that once you return NETDEV_TX_OK from your
+   ndo_start_xmit method, it is your driver's responsibility to free
+   up the SKB and in some finite amount of time.
 
    For example, this means that it is not allowed for your TX
    mitigation scheme to let TX packets "hang out" in the TX
@@ -74,8 +72,9 @@
    This error can deadlock sockets waiting for send buffer room
    to be freed up.
 
-   If you return 1 from the hard_start_xmit method, you must not keep
-   any reference to that SKB and you must not attempt to free it up.
+   If you return NETDEV_TX_BUSY from the ndo_start_xmit method, you
+   must not keep any reference to that SKB and you must not attempt
+   to free it up.
 
 Probing guidelines:
 
@@ -85,10 +84,10 @@
 
 Close/stop guidelines:
 
-1) After the dev->stop routine has been called, the hardware must
+1) After the ndo_stop routine has been called, the hardware must
    not receive or transmit any data.  All in flight packets must
    be aborted. If necessary, poll or wait for completion of 
    any reset commands.
 
-2) The dev->stop routine will be called by unregister_netdevice
+2) The ndo_stop routine will be called by unregister_netdevice
    if device is still UP.
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index 162f323..fcb6c71c 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -94,8 +94,8 @@
 
   Configuring a network driver to load properly when the system is started is
   distribution dependent. Typically, the configuration process involves adding
-  an alias line to /etc/modules.conf or /etc/modprobe.conf as well as editing
-  other system startup scripts and/or configuration files.  Many popular Linux
+  an alias line to /etc/modprobe.d/*.conf as well as editing other system
+  startup scripts and/or configuration files.  Many popular Linux
   distributions ship with tools to make these changes for you. To learn the
   proper way to configure a network device for your system, refer to your
   distribution documentation.  If during this process you are asked for the
@@ -103,7 +103,7 @@
   PRO/100 Family of Adapters is e100.
 
   As an example, if you install the e100 driver for two PRO/100 adapters
-  (eth0 and eth1), add the following to modules.conf or modprobe.conf:
+  (eth0 and eth1), add the following to a configuraton file in /etc/modprobe.d/
 
        alias eth0 e100
        alias eth1 e100
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ad3e80e..bd80ba5 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -604,15 +604,8 @@
 ip_local_port_range - 2 INTEGERS
 	Defines the local port range that is used by TCP and UDP to
 	choose the local port. The first number is the first, the
-	second the last local port number. Default value depends on
-	amount of memory available on the system:
-	> 128Mb 32768-61000
-	< 128Mb 1024-4999 or even less.
-	This number defines number of active connections, which this
-	system can issue simultaneously to systems not supporting
-	TCP extensions (timestamps). With tcp_tw_recycle enabled
-	(i.e. by default) range 1024-4999 is enough to issue up to
-	2000 connections per second to systems supporting timestamps.
+	second the last local port number. The default values are
+	32768 and 61000 respectively.
 
 ip_local_reserved_ports - list of comma separated ranges
 	Specify the ports which are reserved for known third-party
diff --git a/Documentation/networking/ipv6.txt b/Documentation/networking/ipv6.txt
index 9fd7e21..6cd74fa 100644
--- a/Documentation/networking/ipv6.txt
+++ b/Documentation/networking/ipv6.txt
@@ -2,9 +2,9 @@
 Options for the ipv6 module are supplied as parameters at load time.
 
 Module options may be given as command line arguments to the insmod
-or modprobe command, but are usually specified in either the
-/etc/modules.conf or /etc/modprobe.conf configuration file, or in a
-distro-specific configuration file.
+or modprobe command, but are usually specified in either
+/etc/modules.d/*.conf configuration files, or in a distro-specific
+configuration file.
 
 The available ipv6 module parameters are listed below.  If a parameter
 is not specified the default value is used.
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
index e196f16..d75a1f9 100644
--- a/Documentation/networking/ixgb.txt
+++ b/Documentation/networking/ixgb.txt
@@ -274,9 +274,9 @@
   -------------------------------------------------
   Configuring a network driver to load properly when the system is started is
   distribution dependent. Typically, the configuration process involves adding
-  an alias line to /etc/modprobe.conf as well as editing other system startup
-  scripts and/or configuration files.  Many popular Linux distributions ship
-  with tools to make these changes for you.  To learn the proper way to
+  an alias line to files in /etc/modprobe.d/ as well as editing other system
+  startup scripts and/or configuration files.  Many popular Linux distributions
+  ship with tools to make these changes for you.  To learn the proper way to
   configure a network device for your system, refer to your distribution
   documentation.  If during this process you are asked for the driver or module
   name, the name for the Linux Base Driver for the Intel 10GbE Family of
diff --git a/Documentation/networking/ltpc.txt b/Documentation/networking/ltpc.txt
index fe2a912..0bf3220c 100644
--- a/Documentation/networking/ltpc.txt
+++ b/Documentation/networking/ltpc.txt
@@ -25,7 +25,7 @@
 
 If you load the driver as a module, you can pass the parameters "io=",
 "irq=", and "dma=" on the command line with insmod or modprobe, or add
-them as options in /etc/modprobe.conf:
+them as options in a configuration file in /etc/modprobe.d/ directory:
 
  alias lt0 ltpc # autoload the module when the interface is configured
  options ltpc io=0x240 irq=9 dma=1
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index 8935834..c7ecc70 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -47,26 +47,25 @@
 
 struct net_device synchronization rules
 =======================================
-dev->open:
+ndo_open:
 	Synchronization: rtnl_lock() semaphore.
 	Context: process
 
-dev->stop:
+ndo_stop:
 	Synchronization: rtnl_lock() semaphore.
 	Context: process
-	Note1: netif_running() is guaranteed false
-	Note2: dev->poll() is guaranteed to be stopped
+	Note: netif_running() is guaranteed false
 
-dev->do_ioctl:
+ndo_do_ioctl:
 	Synchronization: rtnl_lock() semaphore.
 	Context: process
 
-dev->get_stats:
+ndo_get_stats:
 	Synchronization: dev_base_lock rwlock.
 	Context: nominally process, but don't sleep inside an rwlock
 
-dev->hard_start_xmit:
-	Synchronization: netif_tx_lock spinlock.
+ndo_start_xmit:
+	Synchronization: __netif_tx_lock spinlock.
 
 	When the driver sets NETIF_F_LLTX in dev->features this will be
 	called without holding netif_tx_lock. In this case the driver
@@ -87,20 +86,20 @@
 	o NETDEV_TX_LOCKED Locking failed, please retry quickly.
 	  Only valid when NETIF_F_LLTX is set.
 
-dev->tx_timeout:
-	Synchronization: netif_tx_lock spinlock.
+ndo_tx_timeout:
+	Synchronization: netif_tx_lock spinlock; all TX queues frozen.
 	Context: BHs disabled
 	Notes: netif_queue_stopped() is guaranteed true
 
-dev->set_rx_mode:
-	Synchronization: netif_tx_lock spinlock.
+ndo_set_rx_mode:
+	Synchronization: netif_addr_lock spinlock.
 	Context: BHs disabled
 
 struct napi_struct synchronization rules
 ========================================
 napi->poll:
 	Synchronization: NAPI_STATE_SCHED bit in napi->state.  Device
-		driver's dev->close method will invoke napi_disable() on
+		driver's ndo_stop method will invoke napi_disable() on
 		all NAPI instances which will do a sleeping poll on the
 		NAPI_STATE_SCHED napi->state bit, waiting for all pending
 		NAPI activity to cease.
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index bd70976..b4038ff 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -67,8 +67,8 @@
 =================
 
 There are several parameters which may be provided to the driver when
-its module is loaded.  These are usually placed in /etc/modprobe.conf
-(/etc/modules.conf in 2.4).  Example:
+its module is loaded.  These are usually placed in /etc/modprobe.d/*.conf
+configuretion files.  Example:
 
 options 3c59x debug=3 rx_copybreak=300
 
@@ -425,7 +425,7 @@
       1) Increase the debug level.  Usually this is done via:
 
          a) modprobe driver debug=7
-         b) In /etc/modprobe.conf (or /etc/modules.conf for 2.4):
+         b) In /etc/modprobe.d/driver.conf:
             options driver debug=7
 
       2) Recreate the problem with the higher debug level,
diff --git a/Documentation/parport.txt b/Documentation/parport.txt
index 93a7cee..c208e43 100644
--- a/Documentation/parport.txt
+++ b/Documentation/parport.txt
@@ -36,18 +36,17 @@
 are automatically detected.
 
 
-KMod
-----
+modprobe
+--------
 
-If you use kmod, you will find it useful to edit /etc/modprobe.conf.
-Here is an example of the lines that need to be added:
+If you use modprobe , you will find it useful to add lines as below to a
+configuration file in /etc/modprobe.d/ directory:.
 
 	alias parport_lowlevel parport_pc
 	options parport_pc io=0x378,0x278 irq=7,auto
 
-KMod will then automatically load parport_pc (with the options
-"io=0x378,0x278 irq=7,auto") whenever a parallel port device driver
-(such as lp) is loaded.
+modprobe will load parport_pc (with the options "io=0x378,0x278 irq=7,auto")
+whenever a parallel port device driver (such as lp) is loaded.
 
 Note that these are example lines only!  You shouldn't in general need
 to specify any options to parport_pc in order to be able to use a
diff --git a/Documentation/s390/3270.txt b/Documentation/s390/3270.txt
index 7a5c73a..7c715de 100644
--- a/Documentation/s390/3270.txt
+++ b/Documentation/s390/3270.txt
@@ -47,9 +47,9 @@
 one another.  ReIPL as soon as possible after running the configuration
 script and the resulting /tmp/mkdev3270.
 
-If you have chosen to make tub3270 a module, you add a line to
-/etc/modprobe.conf.  If you are working on a VM virtual machine, you
-can use DEF GRAF to define virtual 3270 devices.
+If you have chosen to make tub3270 a module, you add a line to a
+configuration file under /etc/modprobe.d/.  If you are working on a VM
+virtual machine, you can use DEF GRAF to define virtual 3270 devices.
 
 You may generate both 3270 and 3215 console support, or one or the
 other, or neither.  If you generate both, the console type under VM is
@@ -60,7 +60,7 @@
 
 In brief, these are the steps:
 	1. Install the tub3270 patch
-	2. (If a module) add a line to /etc/modprobe.conf
+	2. (If a module) add a line to a file in /etc/modprobe.d/*.conf
 	3. (If VM) define devices with DEF GRAF
 	4. Reboot
 	5. Configure
@@ -84,13 +84,12 @@
 		make modules_install
 
 	2. (Perform this step only if you have configured tub3270 as a
-	module.)  Add a line to /etc/modprobe.conf to automatically
-	load the driver when it's needed.  With this line added,
-	you will see login prompts appear on your 3270s as soon as
-	boot is complete (or with emulated 3270s, as soon as you dial
-	into your vm guest using the command "DIAL <vmguestname>").
-	Since the line-mode major number is 227, the line to add to
-	/etc/modprobe.conf should be:
+	module.)  Add a line to a file /etc/modprobe.d/*.conf to automatically
+	load the driver when it's needed.  With this line added, you will see
+	login prompts appear on your 3270s as soon as boot is complete (or
+	with emulated 3270s, as soon as you dial into your vm guest using the
+	command "DIAL <vmguestname>").  Since the line-mode major number is
+	227, the line to add should be:
 		alias char-major-227 tub3270
 
 	3. Define graphic devices to your vm guest machine, if you
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index b48ded5..b7dd650 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -94,3 +94,5 @@
 	- info on second generation driver for sym53c8xx based adapters
 tmscsim.txt
 	- info on driver for AM53c974 based adapters
+ufs.txt
+	- info on Universal Flash Storage(UFS) and UFS host controller driver.
diff --git a/Documentation/scsi/aic79xx.txt b/Documentation/scsi/aic79xx.txt
index 64ac709..e2d3273 100644
--- a/Documentation/scsi/aic79xx.txt
+++ b/Documentation/scsi/aic79xx.txt
@@ -215,7 +215,7 @@
                  INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
                  USE THEM WITH CAUTION. 
 
-   Edit the file "modprobe.conf" in the directory /etc and add/edit a
+   Put a .conf file in the /etc/modprobe.d/ directory and add/edit a
    line containing 'options aic79xx aic79xx=[command[,command...]]' where
    'command' is one or more of the following:
    -----------------------------------------------------------------
diff --git a/Documentation/scsi/aic7xxx.txt b/Documentation/scsi/aic7xxx.txt
index 18f8d19..7c5d022 100644
--- a/Documentation/scsi/aic7xxx.txt
+++ b/Documentation/scsi/aic7xxx.txt
@@ -190,7 +190,7 @@
                  INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
                  USE THEM WITH CAUTION. 
 
-   Edit the file "modprobe.conf" in the directory /etc and add/edit a
+   Put a .conf file in the /etc/modprobe.d directory and add/edit a
    line containing 'options aic7xxx aic7xxx=[command[,command...]]' where
    'command' is one or more of the following:
    -----------------------------------------------------------------
diff --git a/Documentation/scsi/osst.txt b/Documentation/scsi/osst.txt
index ad86c6d..00c8ebb2 100644
--- a/Documentation/scsi/osst.txt
+++ b/Documentation/scsi/osst.txt
@@ -66,7 +66,7 @@
 If you want to have the module autoloaded on access to /dev/osst, you may
 add something like
 alias char-major-206 osst
-to your /etc/modprobe.conf (before 2.6: modules.conf).
+to a file under /etc/modprobe.d/ directory.
 
 You may find it convenient to create a symbolic link 
 ln -s nosst0 /dev/tape
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 691ca29..685bf35 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -390,6 +390,10 @@
 	     MT_ST_SYSV sets the SYSV semantics (mode)
 	     MT_ST_NOWAIT enables immediate mode (i.e., don't wait for
 	        the command to finish) for some commands (e.g., rewind)
+	     MT_ST_NOWAIT_EOF enables immediate filemark mode (i.e. when
+	        writing a filemark, don't wait for it to complete). Please
+		see the BASICS note about MTWEOFI with respect to the
+		possible dangers of writing immediate filemarks.
 	     MT_ST_SILI enables setting the SILI bit in SCSI commands when
 		reading in variable block mode to enhance performance when
 		reading blocks shorter than the byte count; set this only
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
new file mode 100644
index 0000000..41a6164
--- /dev/null
+++ b/Documentation/scsi/ufs.txt
@@ -0,0 +1,133 @@
+                       Universal Flash Storage
+                       =======================
+
+
+Contents
+--------
+
+1. Overview
+2. UFS Architecture Overview
+  2.1 Application Layer
+  2.2 UFS Transport Protocol(UTP) layer
+  2.3 UFS Interconnect(UIC) Layer
+3. UFSHCD Overview
+  3.1 UFS controller initialization
+  3.2 UTP Transfer requests
+  3.3 UFS error handling
+  3.4 SCSI Error handling
+
+
+1. Overview
+-----------
+
+Universal Flash Storage(UFS) is a storage specification for flash devices.
+It is aimed to provide a universal storage interface for both
+embedded and removable flash memory based storage in mobile
+devices such as smart phones and tablet computers. The specification
+is defined by JEDEC Solid State Technology Association. UFS is based
+on MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
+physical layer and MIPI Unipro as the link layer.
+
+The main goals of UFS is to provide,
+ * Optimized performance:
+   For UFS version 1.0 and 1.1 the target performance is as follows,
+   Support for Gear1 is mandatory (rate A: 1248Mbps, rate B: 1457.6Mbps)
+   Support for Gear2 is optional (rate A: 2496Mbps, rate B: 2915.2Mbps)
+   Future version of the standard,
+   Gear3 (rate A: 4992Mbps, rate B: 5830.4Mbps)
+ * Low power consumption
+ * High random IOPs and low latency
+
+
+2. UFS Architecture Overview
+----------------------------
+
+UFS has a layered communication architecture which is based on SCSI
+SAM-5 architectural model.
+
+UFS communication architecture consists of following layers,
+
+2.1 Application Layer
+
+  The Application layer is composed of UFS command set layer(UCS),
+  Task Manager and Device manager. The UFS interface is designed to be
+  protocol agnostic, however SCSI has been selected as a baseline
+  protocol for versions 1.0 and 1.1 of UFS protocol  layer.
+  UFS supports subset of SCSI commands defined by SPC-4 and SBC-3.
+  * UCS: It handles SCSI commands supported by UFS specification.
+  * Task manager: It handles task management functions defined by the
+     UFS which are meant for command queue control.
+  * Device manager: It handles device level operations and device
+     configuration operations. Device level operations mainly involve
+     device power management operations and commands to Interconnect
+     layers. Device level configurations involve handling of query
+     requests which are used to modify and retrieve configuration
+     information of the device.
+
+2.2 UFS Transport Protocol(UTP) layer
+
+  UTP layer provides services for
+  the higher layers through Service Access Points. UTP defines 3
+  service access points for higher layers.
+  * UDM_SAP: Device manager service access point is exposed to device
+    manager for device level operations. These device level operations
+    are done through query requests.
+  * UTP_CMD_SAP: Command service access point is exposed to UFS command
+    set layer(UCS) to transport commands.
+  * UTP_TM_SAP: Task management service access point is exposed to task
+    manager to transport task management functions.
+  UTP transports messages through UFS protocol information unit(UPIU).
+
+2.3 UFS Interconnect(UIC) Layer
+
+  UIC is the lowest layer of UFS layered architecture. It handles
+  connection between UFS host and UFS device. UIC consists of
+  MIPI UniPro and MIPI M-PHY. UIC provides 2 service access points
+  to upper layer,
+  * UIC_SAP: To transport UPIU between UFS host and UFS device.
+  * UIO_SAP: To issue commands to Unipro layers.
+
+
+3. UFSHCD Overview
+------------------
+
+The UFS host controller driver is based on Linux SCSI Framework.
+UFSHCD is a low level device driver which acts as an interface between
+SCSI Midlayer and PCIe based UFS host controllers.
+
+The current UFSHCD implementation supports following functionality,
+
+3.1 UFS controller initialization
+
+  The initialization module brings UFS host controller to active state
+  and prepares the controller to transfer commands/response between
+  UFSHCD and UFS device.
+
+3.2 UTP Transfer requests
+
+  Transfer request handling module of UFSHCD receives SCSI commands
+  from SCSI Midlayer, forms UPIUs and issues the UPIUs to UFS Host
+  controller. Also, the module decodes, responses received from UFS
+  host controller in the form of UPIUs and intimates the SCSI Midlayer
+  of the status of the command.
+
+3.3 UFS error handling
+
+  Error handling module handles Host controller fatal errors,
+  Device fatal errors and UIC interconnect layer related errors.
+
+3.4 SCSI Error handling
+
+  This is done through UFSHCD SCSI error handling routines registered
+  with SCSI Midlayer. Examples of some of the error handling commands
+  issues by SCSI Midlayer are Abort task, Lun reset and host reset.
+  UFSHCD Routines to perform these tasks are registered with
+  SCSI Midlayer through .eh_abort_handler, .eh_device_reset_handler and
+  .eh_host_reset_handler.
+
+In this version of UFSHCD Query requests and power management
+functionality are not implemented.
+
+UFS Specifications can be found at,
+UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
+UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
diff --git a/Documentation/serial/computone.txt b/Documentation/serial/computone.txt
index 39ddcdb..a6a1158 100644
--- a/Documentation/serial/computone.txt
+++ b/Documentation/serial/computone.txt
@@ -49,7 +49,7 @@
 
 	Note the hardware address from the Computone ISA cards installed into
 		the system.  These are required for editing ip2.c or editing
-		/etc/modprobe.conf, or for specification on the modprobe
+		/etc/modprobe.d/*.conf, or for specification on the modprobe
 		command line.
 
 	Note that the /etc/modules.conf should be used for older (pre-2.6)
@@ -66,7 +66,7 @@
 c) Set address on ISA cards then:
    edit /usr/src/linux/drivers/char/ip2.c if needed 
 	or
-   edit /etc/modprobe.conf if needed (module).
+   edit config file in  /etc/modprobe.d/ if needed (module).
 	or both to match this setting.
 d) Run "make modules"
 e) Run "make modules_install"
@@ -153,11 +153,11 @@
 selects polled mode). If no base addresses are specified the defaults in 
 ip2.c are used. If you are autoloading the driver module with kerneld or
 kmod the base addresses and interrupt number must also be set in ip2.c
-and recompile or just insert and options line in /etc/modprobe.conf or both.
+and recompile or just insert and options line in /etc/modprobe.d/*.conf or both.
 The options line is equivalent to the command line and takes precedence over
 what is in ip2.c. 
 
-/etc/modprobe.conf sample:
+config sample to put /etc/modprobe.d/*.conf:
 	options ip2 io=1,0x328 irq=1,10
 	alias char-major-71 ip2
 	alias char-major-72 ip2
diff --git a/Documentation/serial/rocket.txt b/Documentation/serial/rocket.txt
index 1d85829..60b0398 100644
--- a/Documentation/serial/rocket.txt
+++ b/Documentation/serial/rocket.txt
@@ -62,7 +62,7 @@
 
 If installed as a module, the module must be loaded.  This can be done
 manually by entering "modprobe rocket".  To have the module loaded automatically
-upon system boot, edit the /etc/modprobe.conf file and add the line
+upon system boot, edit a /etc/modprobe.d/*.conf file and add the line
 "alias char-major-46 rocket".
 
 In order to use the ports, their device names (nodes) must be created with mknod.
diff --git a/Documentation/serial/stallion.txt b/Documentation/serial/stallion.txt
index 5c4902d..5509091 100644
--- a/Documentation/serial/stallion.txt
+++ b/Documentation/serial/stallion.txt
@@ -139,8 +139,8 @@
 
 You will probably want to enter this module load and configuration information
 into your system startup scripts so that the drivers are loaded and configured
-on each system boot. Typically the start up script would be something like
-/etc/modprobe.conf.
+on each system boot. Typically configuration files are put in the
+/etc/modprobe.d/ directory.
 
 
 2.2 STATIC DRIVER CONFIGURATION:
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 6f75ba3..8c16d50 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -2044,7 +2044,7 @@
     Install the necessary firmware files in alsa-firmware package.
     When no hotplug fw loader is available, you need to load the
     firmware via vxloader utility in alsa-tools package.  To invoke
-    vxloader automatically, add the following to /etc/modprobe.conf
+    vxloader automatically, add the following to /etc/modprobe.d/alsa.conf
 
 	install snd-vx222 /sbin/modprobe --first-time -i snd-vx222 && /usr/bin/vxloader
 
@@ -2168,10 +2168,10 @@
 as the same card module.
 
 An example configuration for a single emu10k1 card is like below:
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
 alias snd-card-0 snd-emu10k1
 alias sound-slot-0 snd-emu10k1
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
 
 The available number of auto-loaded sound cards depends on the module
 option "cards_limit" of snd module.  As default it's set to 1.
@@ -2184,7 +2184,7 @@
 
 An example configuration for two sound cards is like below:
 
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
 # ALSA portion
 options snd cards_limit=2
 alias snd-card-0 snd-interwave
@@ -2194,7 +2194,7 @@
 # OSS/Free portion
 alias sound-slot-0 snd-interwave
 alias sound-slot-1 snd-ens1371
------ /etc/modprobe.conf
+----- /etc/modprobe.d/alsa.conf
 
 In this example, the interwave card is always loaded as the first card
 (index 0) and ens1371 as the second (index 1).
diff --git a/Documentation/sound/alsa/Audiophile-Usb.txt b/Documentation/sound/alsa/Audiophile-Usb.txt
index a4c53d8..654dd3b 100644
--- a/Documentation/sound/alsa/Audiophile-Usb.txt
+++ b/Documentation/sound/alsa/Audiophile-Usb.txt
@@ -232,7 +232,7 @@
    # modprobe snd-usb-audio index=1 device_setup=0x09
 
  * Or while configuring the modules options in your modules configuration file
-   - For Fedora distributions, edit the /etc/modprobe.conf file:
+   (tipically a .conf file in /etc/modprobe.d/ directory:
        alias snd-card-1 snd-usb-audio
        options snd-usb-audio index=1 device_setup=0x09
 
@@ -253,7 +253,7 @@
    - first turn off the device
    - de-register the snd-usb-audio module (modprobe -r)
    - change the device_setup parameter by changing the device_setup
-     option in /etc/modprobe.conf 
+     option in /etc/modprobe.d/*.conf
    - turn on the device
  * A workaround for this last issue has been applied to kernel 2.6.23, but it may not
    be enough to ensure the 'stability' of the device initialization.
diff --git a/Documentation/sound/alsa/MIXART.txt b/Documentation/sound/alsa/MIXART.txt
index ef42c44..4ee35b4 100644
--- a/Documentation/sound/alsa/MIXART.txt
+++ b/Documentation/sound/alsa/MIXART.txt
@@ -76,9 +76,9 @@
  when CONFIG_FW_LOADER is set.  The mixartloader is necessary only
  for older versions or when you build the driver into kernel.]
  
-For loading the firmware automatically after the module is loaded, use
-the post-install command.  For example, add the following entry to
-/etc/modprobe.conf for miXart driver:
+For loading the firmware automatically after the module is loaded, use a
+install command.  For example, add the following entry to
+/etc/modprobe.d/mixart.conf for miXart driver:
 
 	install snd-mixart /sbin/modprobe --first-time -i snd-mixart && \
 			   /usr/bin/mixartloader
diff --git a/Documentation/sound/alsa/OSS-Emulation.txt b/Documentation/sound/alsa/OSS-Emulation.txt
index 022aaeb..152ca2a 100644
--- a/Documentation/sound/alsa/OSS-Emulation.txt
+++ b/Documentation/sound/alsa/OSS-Emulation.txt
@@ -19,7 +19,7 @@
 define these aliases by yourself.
 
 Only necessary step for auto-loading of OSS modules is to define the
-card alias in /etc/modprobe.conf, such as
+card alias in /etc/modprobe.d/alsa.conf, such as
 
 	alias sound-slot-0 snd-emu10k1
 
diff --git a/Documentation/sound/oss/AudioExcelDSP16 b/Documentation/sound/oss/AudioExcelDSP16
index e0dc064..ea8549f 100644
--- a/Documentation/sound/oss/AudioExcelDSP16
+++ b/Documentation/sound/oss/AudioExcelDSP16
@@ -41,7 +41,7 @@
 		(0x300, 0x310, 0x320 or 0x330)
 mpu_irq		MPU-401 irq line (5, 7, 9, 10 or 0)
 
-The /etc/modprobe.conf will have lines like this:
+A configuration file in /etc/modprobe.d/ directory will have lines like this:
 
 options opl3 io=0x388
 options ad1848 io=0x530 irq=11 dma=3
@@ -51,11 +51,11 @@
 ad1848 are the corresponding options for the MSS and OPL3 modules.
 
 Loading MSS and OPL3 needs to pre load the aedsp16 module to set up correctly
-the sound card. Installation dependencies must be written in the modprobe.conf
-file:
+the sound card. Installation dependencies must be written in configuration
+files under /etc/modprobe.d/ directory:
 
-install ad1848 /sbin/modprobe aedsp16 && /sbin/modprobe -i ad1848
-install opl3 /sbin/modprobe aedsp16 && /sbin/modprobe -i opl3
+softdep ad1848 pre: aedsp16
+softdep opl3 pre: aedsp16
 
 Then you must load the sound modules stack in this order:
 sound -> aedsp16 -> [ ad1848, opl3 ]
diff --git a/Documentation/sound/oss/CMI8330 b/Documentation/sound/oss/CMI8330
index 9c439f1..8a5fd16 100644
--- a/Documentation/sound/oss/CMI8330
+++ b/Documentation/sound/oss/CMI8330
@@ -143,11 +143,10 @@
 
 
 
-Alma Chao <elysian@ethereal.torsion.org> suggests the following /etc/modprobe.conf:
+Alma Chao <elysian@ethereal.torsion.org> suggests the following in
+a /etc/modprobe.d/*conf file:
 
 alias sound ad1848
 alias synth0 opl3
 options ad1848 io=0x530 irq=7 dma=0 soundpro=1
 options opl3 io=0x388
-
-	
diff --git a/Documentation/sound/oss/Introduction b/Documentation/sound/oss/Introduction
index 75d967f..42da2d8 100644
--- a/Documentation/sound/oss/Introduction
+++ b/Documentation/sound/oss/Introduction
@@ -167,8 +167,8 @@
 MODPROBE:
 =========
 
-If loading via modprobe, these common files are automatically loaded 
-when requested by modprobe.  For example, my /etc/modprobe.conf contains:
+If loading via modprobe, these common files are automatically loaded when
+requested by modprobe.  For example, my /etc/modprobe.d/oss.conf contains:
 
 alias sound sb 
 options sb io=0x240 irq=9 dma=3 dma16=5 mpu_io=0x300
@@ -228,7 +228,7 @@
 driver, you should do the following:
 
 1.  remove sound modules (detailed above)
-2.  remove the sound modules from /etc/modprobe.conf
+2.  remove the sound modules from /etc/modprobe.d/*.conf
 3.  move the sound modules from /lib/modules/<kernel>/misc
     (for example, I make a /lib/modules/<kernel>/misc/tmp
     directory and copy the sound module files to that 
@@ -265,7 +265,7 @@
     sb.o could be copied (or symlinked) to sb1.o for the
     second SoundBlaster.
 
-2.  Make a second entry in /etc/modprobe.conf, for example,
+2.  Make a second entry in /etc/modprobe.d/*conf, for example,
     sound1 or sb1.  This second entry should refer to the
     new module names for example sb1, and should include
     the I/O, etc. for the second sound card.
@@ -369,7 +369,7 @@
 2)  On the command line when using insmod or in a bash script
     using command line calls to load sound.
 
-3)  In /etc/modprobe.conf when using modprobe.
+3)  In /etc/modprobe.d/*conf when using modprobe.
 
 4)  Via Red Hat's GPL'd /usr/sbin/sndconfig program (text based).
 
diff --git a/Documentation/sound/oss/Opti b/Documentation/sound/oss/Opti
index c15af3c..4cd5d9a 100644
--- a/Documentation/sound/oss/Opti
+++ b/Documentation/sound/oss/Opti
@@ -18,7 +18,7 @@
 If you have another OS installed on your computer it is recommended
 that Linux and the other OS use the same resources.
 
-Also, it is recommended that resources specified in /etc/modprobe.conf
+Also, it is recommended that resources specified in /etc/modprobe.d/*.conf
 and resources specified in /etc/isapnp.conf agree.
 
 Compiling the sound driver
@@ -67,11 +67,7 @@
 
 Using kmod and autoloading the sound driver
 -------------------------------------------
-Comment: as of linux-2.1.90 kmod is replacing kerneld.
-The config file '/etc/modprobe.conf' is used as before.
-
-This is the sound part of my /etc/modprobe.conf file.
-Following that I will explain each line.
+Config files in '/etc/modprobe.d/' are used as below:
 
 alias mixer0 mad16
 alias audio0 mad16
diff --git a/Documentation/sound/oss/PAS16 b/Documentation/sound/oss/PAS16
index 3dca4b7..5c27229 100644
--- a/Documentation/sound/oss/PAS16
+++ b/Documentation/sound/oss/PAS16
@@ -128,7 +128,7 @@
   You can then get OPL3 functionality by issuing the command:
   insmod opl3
   In addition, you must either add the following line to 
-  /etc/modprobe.conf:
+  /etc/modprobe.d/*.conf:
   options opl3 io=0x388
   or else add the following line to /etc/lilo.conf:
   opl3=0x388
@@ -158,5 +158,5 @@
 append="pas2=0x388,10,3,-1,0,-1,-1,-1 opl3=0x388"
 
 If sound is built totally modular, the above options may be 
-specified in /etc/modprobe.conf for pas2, sb and opl3
+specified in /etc/modprobe.d/*.conf for pas2, sb and opl3
 respectively. 
diff --git a/Documentation/sound/oss/README.modules b/Documentation/sound/oss/README.modules
index e691d74..cdc0394 100644
--- a/Documentation/sound/oss/README.modules
+++ b/Documentation/sound/oss/README.modules
@@ -26,7 +26,7 @@
 drivers/sound dir. Now one simply configures and makes one's kernel and
 modules in the usual way.
 
- Then, add to your /etc/modprobe.conf something like:
+ Then, add to your /etc/modprobe.d/oss.conf something like:
 
 alias char-major-14-* sb
 install sb /sbin/modprobe -i sb && /sbin/modprobe adlib_card
@@ -36,7 +36,7 @@
  Alternatively, if you have compiled in kernel level ISAPnP support:
 
 alias char-major-14 sb
-post-install sb /sbin/modprobe "-k" "adlib_card"
+softdep sb post: adlib_card
 options adlib_card io=0x388
 
   The effect of this is that the sound driver and all necessary bits and
@@ -66,12 +66,12 @@
  Note that at present there is no way to configure the io, irq and other
 parameters for the modular drivers as one does for the wired drivers.. One
 needs to pass the modules the necessary parameters as arguments, either
-with /etc/modprobe.conf or with command-line args to modprobe, e.g.
+with /etc/modprobe.d/*.conf or with command-line args to modprobe, e.g.
 
 modprobe sb io=0x220 irq=7 dma=1 dma16=5 mpu_io=0x330
 modprobe adlib_card io=0x388
 
- recommend using /etc/modprobe.conf.
+ recommend using /etc/modprobe.d/*.conf.
 
 Persistent DMA Buffers:
 
@@ -89,7 +89,7 @@
 
 To make the sound driver use persistent DMA buffers we need to pass the
 sound.o module a "dmabuf=1" command-line argument. This is normally done
-in /etc/modprobe.conf like so:
+in /etc/modprobe.d/*.conf files like so:
 
 options sound		dmabuf=1
 
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 312e375..642f844 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -241,9 +241,8 @@
 
 *  I have more questions, who can I ask?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-And I'll answer any questions about the registration system you got, also
-responding as soon as possible.
- -Crutcher
+Just ask them on the linux-kernel mailing list:
+	linux-kernel@vger.kernel.org
 
 *  Credits
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 817df29..4204eb0 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -179,7 +179,8 @@
 
 	modprobe usbcore autosuspend=5
 
-Equivalently, you could add to /etc/modprobe.conf a line saying:
+Equivalently, you could add to a configuration file in /etc/modprobe.d
+a line saying:
 
 	options usbcore autosuspend=5
 
diff --git a/Documentation/video4linux/CQcam.txt b/Documentation/video4linux/CQcam.txt
index 8977e7c..6e680fe 100644
--- a/Documentation/video4linux/CQcam.txt
+++ b/Documentation/video4linux/CQcam.txt
@@ -61,29 +61,19 @@
 2.2 Configuration
 
   The configuration requires module configuration and device
-configuration.  I like kmod or kerneld process with the
-/etc/modprobe.conf file so the modules can automatically load/unload as
-they are used.  The video devices could already exist, be generated
-using MAKEDEV, or need to be created.  The following sections detail
-these procedures.
+configuration.  The following sections detail these procedures.
 
 
 2.1 Module Configuration
 
   Using modules requires a bit of work to install and pass the
-parameters.  Understand that entries in /etc/modprobe.conf of:
+parameters.  Understand that entries in /etc/modprobe.d/*.conf of:
 
    alias parport_lowlevel parport_pc
    options parport_pc io=0x378 irq=none
    alias char-major-81 videodev
    alias char-major-81-0 c-qcam
 
-will cause the kmod/modprobe to do certain things.  If you are
-using kmod, then a request for a 'char-major-81-0' will cause
-the 'c-qcam' module to load.  If you have other video sources with
-modules, you might want to assign the different minor numbers to
-different modules.
-
 2.2 Device Configuration
 
   At this point, we need to ensure that the device files exist.
diff --git a/Documentation/video4linux/Zoran b/Documentation/video4linux/Zoran
index 9ed629d..b5a911f 100644
--- a/Documentation/video4linux/Zoran
+++ b/Documentation/video4linux/Zoran
@@ -255,7 +255,7 @@
 option with X being the card number as given in the previous section.
 To have more than one card, use card=X1[,X2[,X3,[X4[..]]]]
 
-To automate this, add the following to your /etc/modprobe.conf:
+To automate this, add the following to your /etc/modprobe.d/zoran.conf:
 
 options zr36067 card=X1[,X2[,X3[,X4[..]]]]
 alias char-major-81-0 zr36067
diff --git a/Documentation/video4linux/bttv/Modules.conf b/Documentation/video4linux/bttv/Modules.conf
index 753f159..8f258fa 100644
--- a/Documentation/video4linux/bttv/Modules.conf
+++ b/Documentation/video4linux/bttv/Modules.conf
@@ -1,4 +1,4 @@
-# For modern kernels (2.6 or above), this belongs in /etc/modprobe.conf
+# For modern kernels (2.6 or above), this belongs in /etc/modprobe.d/*.conf
 # For for 2.4 kernels or earlier, this belongs in /etc/modules.conf.
 
 # i2c
diff --git a/Documentation/video4linux/meye.txt b/Documentation/video4linux/meye.txt
index 34e2842..a051152 100644
--- a/Documentation/video4linux/meye.txt
+++ b/Documentation/video4linux/meye.txt
@@ -55,7 +55,7 @@
 -----------
 
 In order to automatically load the meye module on use, you can put those lines
-in your /etc/modprobe.conf file:
+in your /etc/modprobe.d/meye.conf file:
 
 	alias char-major-81 videodev
 	alias char-major-81-0 meye
diff --git a/MAINTAINERS b/MAINTAINERS
index eecf344..32671e0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -228,7 +228,7 @@
 L:	linux-acpi@vger.kernel.org
 W:	http://www.lesswatts.org/projects/acpi/
 Q:	http://patchwork.kernel.org/project/linux-acpi/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
 S:	Supported
 F:	drivers/acpi/
 F:	drivers/pnp/pnpacpi/
@@ -1251,7 +1251,6 @@
 M:	Jiri Slaby <jirislaby@gmail.com>
 M:	Nick Kossifidis <mickflemm@gmail.com>
 M:	"Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
-M:	Bob Copeland <me@bobcopeland.com>
 L:	linux-wireless@vger.kernel.org
 L:	ath5k-devel@lists.ath5k.org
 W:	http://wireless.kernel.org/en/users/Drivers/ath5k
@@ -1522,8 +1521,8 @@
 M:	Johan Hedberg <johan.hedberg@gmail.com>
 L:	linux-bluetooth@vger.kernel.org
 W:	http://www.bluez.org/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
 S:	Maintained
 F:	drivers/bluetooth/
 
@@ -1533,8 +1532,8 @@
 M:	Johan Hedberg <johan.hedberg@gmail.com>
 L:	linux-bluetooth@vger.kernel.org
 W:	http://www.bluez.org/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
 S:	Maintained
 F:	net/bluetooth/
 F:	include/net/bluetooth/
@@ -2451,17 +2450,17 @@
 
 EDAC-CORE
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Supported
 F:	Documentation/edac.txt
-F:	drivers/edac/edac_*
+F:	drivers/edac/
 F:	include/linux/edac.h
 
 EDAC-AMD64
 M:	Doug Thompson <dougthompson@xmission.com>
 M:	Borislav Petkov <borislav.petkov@amd.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Supported
 F:	drivers/edac/amd64_edac*
@@ -2469,35 +2468,35 @@
 EDAC-E752X
 M:	Mark Gross <mark.gross@intel.com>
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/e752x_edac.c
 
 EDAC-E7XXX
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/e7xxx_edac.c
 
 EDAC-I82443BXGX
 M:	Tim Small <tim@buttersideup.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i82443bxgx_edac.c
 
 EDAC-I3000
 M:	Jason Uhlenkott <juhlenko@akamai.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i3000_edac.c
 
 EDAC-I5000
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i5000_edac.c
@@ -2526,21 +2525,21 @@
 EDAC-I82975X
 M:	Ranganathan Desikan <ravi@jetztechnologies.com>
 M:	"Arvind R." <arvino55@gmail.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i82975x_edac.c
 
 EDAC-PASEMI
 M:	Egor Martovetsky <egor@pasemi.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/pasemi_edac.c
 
 EDAC-R82600
 M:	Tim Small <tim@buttersideup.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/r82600_edac.c
@@ -3557,17 +3556,13 @@
 S:	Supported
 F:	arch/x86/platform/mrst/pmu.*
 
-INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
+INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
+M:	Stanislav Yakovlev <stas.yakovlev@gmail.com>
 L:	linux-wireless@vger.kernel.org
-S:	Orphan
+S:	Maintained
 F:	Documentation/networking/README.ipw2100
-F:	drivers/net/wireless/ipw2x00/ipw2100.*
-
-INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT
-L:	linux-wireless@vger.kernel.org
-S:	Orphan
 F:	Documentation/networking/README.ipw2200
-F:	drivers/net/wireless/ipw2x00/ipw2200.*
+F:	drivers/net/wireless/ipw2x00/
 
 INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
 M:	Joseph Cihula <joseph.cihula@intel.com>
@@ -4314,6 +4309,13 @@
 L:	linux-man@vger.kernel.org
 S:	Maintained
 
+MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
+M:	Mirko Lindner <mlindner@marvell.com>
+M:	Stephen Hemminger <shemminger@vyatta.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/ethernet/marvell/sk*
+
 MARVELL LIBERTAS WIRELESS DRIVER
 M:	Dan Williams <dcbw@redhat.com>
 L:	libertas-dev@lists.infradead.org
@@ -4344,12 +4346,6 @@
 S:	Odd Fixes
 F:	drivers/mmc/host/mvsdio.*
 
-MARVELL YUKON / SYSKONNECT DRIVER
-M:	Mirko Lindner <mlindner@syskonnect.de>
-M:	Ralph Roesler <rroesler@syskonnect.de>
-W:	http://www.syskonnect.com
-S:	Supported
-
 MATROX FRAMEBUFFER DRIVER
 L:	linux-fbdev@vger.kernel.org
 S:	Orphan
@@ -4537,8 +4533,7 @@
 F:	drivers/net/ethernet/myricom/myri10ge/
 
 NATSEMI ETHERNET DRIVER (DP8381x)
-M:	Tim Hockin <thockin@hockin.org>
-S:	Maintained
+S:	Orphan
 F:	drivers/net/ethernet/natsemi/natsemi.c
 
 NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
@@ -4807,6 +4802,7 @@
 F:	arch/arm/mach-omap2/clockdomain44xx.c
 
 OMAP AUDIO SUPPORT
+M:	Peter Ujfalusi <peter.ujfalusi@ti.com>
 M:	Jarkko Nikula <jarkko.nikula@bitmer.com>
 L:	alsa-devel@alsa-project.org (subscribers-only)
 L:	linux-omap@vger.kernel.org
@@ -5121,6 +5117,11 @@
 F:	include/linux/i2c-algo-pca.h
 F:	include/linux/i2c-pca-platform.h
 
+PCDP - PRIMARY CONSOLE AND DEBUG PORT
+M:	Khalid Aziz <khalid.aziz@hp.com>
+S:	Maintained
+F:	drivers/firmware/pcdp.*
+
 PCI ERROR RECOVERY
 M:     Linas Vepstas <linasvepstas@gmail.com>
 L:	linux-pci@vger.kernel.org
@@ -5642,7 +5643,7 @@
 S:	Maintained
 F:	drivers/remoteproc/
 F:	Documentation/remoteproc.txt
-F:	include/linux/remoteproc.txt
+F:	include/linux/remoteproc.h
 
 RFKILL
 M:	Johannes Berg <johannes@sipsolutions.net>
@@ -6121,12 +6122,6 @@
 S:	Maintained
 F:	drivers/usb/misc/sisusbvga/
 
-SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
-M:	Stephen Hemminger <shemminger@vyatta.com>
-L:	netdev@vger.kernel.org
-S:	Maintained
-F:	drivers/net/ethernet/marvell/sk*
-
 SLAB ALLOCATOR
 M:	Christoph Lameter <cl@linux-foundation.org>
 M:	Pekka Enberg <penberg@kernel.org>
@@ -6292,6 +6287,15 @@
 F:	drivers/tty/serial/sunzilog.c
 F:	drivers/tty/serial/sunzilog.h
 
+SPARSE CHECKER
+M:	"Christopher Li" <sparse@chrisli.org>
+L:	linux-sparse@vger.kernel.org
+W:	https://sparse.wiki.kernel.org/
+T:	git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+T:	git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
+S:	Maintained
+F:	include/linux/compiler.h
+
 SPEAR PLATFORM SUPPORT
 M:	Viresh Kumar <viresh.kumar@st.com>
 L:	spear-devel@list.st.com
@@ -7462,8 +7466,7 @@
 
 WOLFSON MICROELECTRONICS DRIVERS
 M:	Mark Brown <broonie@opensource.wolfsonmicro.com>
-M:	Ian Lartey <ian@opensource.wolfsonmicro.com>
-M:	Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+L:	patches@opensource.wolfsonmicro.com
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-asoc
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
 W:	http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
diff --git a/Makefile b/Makefile
index 1932984..0df3d00 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 3
+PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc2
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
@@ -1170,7 +1170,7 @@
 #
 clean: rm-dirs  := $(CLEAN_DIRS)
 clean: rm-files := $(CLEAN_FILES)
-clean-dirs      := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation)
+clean-dirs      := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
 
 PHONY += $(clean-dirs) clean archclean
 $(clean-dirs):
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 4567aca..dfa32f0 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -12,16 +12,22 @@
 
 #include <asm-generic/dma-mapping-common.h>
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t gfp,
+				    struct dma_attrs *attrs)
 {
-	return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp);
+	return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *vaddr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
-	get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle);
+	get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
 }
 
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 04eea48..df24b76 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -108,7 +108,8 @@
 }
 
 static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t gfp)
+				       dma_addr_t *dma_handle, gfp_t gfp,
+				       struct dma_attrs *attrs)
 {
 	void *ret;
 
@@ -123,7 +124,8 @@
 }
 
 static void alpha_noop_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_addr)
+				     void *cpu_addr, dma_addr_t dma_addr,
+				     struct dma_attrs *attrs)
 {
 	free_pages((unsigned long)cpu_addr, get_order(size));
 }
@@ -174,8 +176,8 @@
 }
 
 struct dma_map_ops alpha_noop_ops = {
-	.alloc_coherent		= alpha_noop_alloc_coherent,
-	.free_coherent		= alpha_noop_free_coherent,
+	.alloc			= alpha_noop_alloc_coherent,
+	.free			= alpha_noop_free_coherent,
 	.map_page		= alpha_noop_map_page,
 	.map_sg			= alpha_noop_map_sg,
 	.mapping_error		= alpha_noop_mapping_error,
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 4361080..cd63479 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -434,7 +434,8 @@
    else DMA_ADDRP is undefined.  */
 
 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
-				      dma_addr_t *dma_addrp, gfp_t gfp)
+				      dma_addr_t *dma_addrp, gfp_t gfp,
+				      struct dma_attrs *attrs)
 {
 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
 	void *cpu_addr;
@@ -478,7 +479,8 @@
    DMA_ADDR past this call are illegal.  */
 
 static void alpha_pci_free_coherent(struct device *dev, size_t size,
-				    void *cpu_addr, dma_addr_t dma_addr)
+				    void *cpu_addr, dma_addr_t dma_addr,
+				    struct dma_attrs *attrs)
 {
 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
 	pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
@@ -952,8 +954,8 @@
 }
 
 struct dma_map_ops alpha_pci_ops = {
-	.alloc_coherent		= alpha_pci_alloc_coherent,
-	.free_coherent		= alpha_pci_free_coherent,
+	.alloc			= alpha_pci_alloc_coherent,
+	.free			= alpha_pci_free_coherent,
 	.map_page		= alpha_pci_map_page,
 	.unmap_page		= alpha_pci_unmap_page,
 	.map_sg			= alpha_pci_map_sg,
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 6f7feb5..35f2ef4 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -120,12 +120,13 @@
  */
 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
 {
-	mask &= _BLOCKABLE;
-	spin_lock_irq(&current->sighand->siglock);
+	sigset_t blocked;
+
 	current->saved_sigmask = current->blocked;
-	siginitset(&current->blocked, mask);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+
+	mask &= _BLOCKABLE;
+	siginitset(&blocked, mask);
+	set_current_blocked(&blocked);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -238,10 +239,7 @@
 		goto give_sigsegv;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	if (restore_sigcontext(sc, regs, sw))
 		goto give_sigsegv;
@@ -276,10 +274,7 @@
 		goto give_sigsegv;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
 		goto give_sigsegv;
@@ -501,14 +496,8 @@
 	else
 		ret = setup_frame(sig, ka, oldset, regs, sw);
 
-	if (ret == 0) {
-		spin_lock_irq(&current->sighand->siglock);
-		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-		if (!(ka->sa.sa_flags & SA_NODEFER)) 
-			sigaddset(&current->blocked,sig);
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-	}
+	if (ret == 0)
+		block_sigmask(ka, sig);
 
 	return ret;
 }
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 4087a56..50d438d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -450,7 +450,7 @@
 		smp_num_probed = 1;
 	}
 
-	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
+	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
 	       smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
 }
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9318084..cf006d4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -338,6 +338,7 @@
 	select HAVE_CLK
 	select CLKDEV_LOOKUP
 	select IRQ_DOMAIN
+	select NEED_MACH_IO_H if PCCARD
 	help
 	  This enables support for systems based on the Atmel AT91RM9200,
 	  AT91SAM9 processors.
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index fc871e7..c877087 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -11,8 +11,6 @@
 # Copyright (C) 1995-2002 Russell King
 #
 
-MKIMAGE         := $(srctree)/scripts/mkuboot.sh
-
 ifneq ($(MACHINE),)
 include $(srctree)/$(MACHINE)/Makefile.boot
 endif
@@ -69,22 +67,19 @@
 
 clean-files := *.dtb
 
-quiet_cmd_uimage = UIMAGE  $@
-      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \
-		   -C none -a $(LOADADDR) -e $(STARTADDR) \
-		   -n 'Linux-$(KERNELRELEASE)' -d $< $@
-
-ifeq ($(CONFIG_ZBOOT_ROM),y)
-$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
+ifneq ($(LOADADDR),)
+  UIMAGE_LOADADDR=$(LOADADDR)
 else
-$(obj)/uImage: LOADADDR=$(ZRELADDR)
+  ifeq ($(CONFIG_ZBOOT_ROM),y)
+    UIMAGE_LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
+  else
+    UIMAGE_LOADADDR=$(ZRELADDR)
+  endif
 endif
 
-$(obj)/uImage: STARTADDR=$(LOADADDR)
-
 check_for_multiple_loadaddr = \
-if [ $(words $(LOADADDR)) -gt 1 ]; then \
-	echo 'multiple load addresses: $(LOADADDR)'; \
+if [ $(words $(UIMAGE_LOADADDR)) -gt 1 ]; then \
+	echo 'multiple load addresses: $(UIMAGE_LOADADDR)'; \
 	echo 'This is incompatible with uImages'; \
 	echo 'Specify LOADADDR on the commandline to build an uImage'; \
 	false; \
diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi
index 92f3662..799ad18 100644
--- a/arch/arm/boot/dts/at91sam9g20.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20.dtsi
@@ -35,7 +35,7 @@
 		};
 	};
 
-	memory@20000000 {
+	memory {
 		reg = <0x20000000 0x08000000>;
 	};
 
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index ac0dc00..7829a4d 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -37,8 +37,8 @@
 		usb0: ohci@00600000 {
 			status = "okay";
 			num-ports = <2>;
-			atmel,vbus-gpio = <&pioD 19 0
-					   &pioD 20 0
+			atmel,vbus-gpio = <&pioD 19 1
+					   &pioD 20 1
 					  >;
 		};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 3d0c32f..9e6eb6e 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -36,7 +36,7 @@
 		};
 	};
 
-	memory@70000000 {
+	memory {
 		reg = <0x70000000 0x10000000>;
 	};
 
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index c4c8ae4..a3633bd 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -17,7 +17,7 @@
 		bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
 	};
 
-	memory@70000000 {
+	memory {
 		reg = <0x70000000 0x4000000>;
 	};
 
@@ -73,8 +73,8 @@
 		usb0: ohci@00700000 {
 			status = "okay";
 			num-ports = <2>;
-			atmel,vbus-gpio = <&pioD 1 0
-					   &pioD 3 0>;
+			atmel,vbus-gpio = <&pioD 1 1
+					   &pioD 3 1>;
 		};
 
 		usb1: ehci@00800000 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index c111001..70ab3a4 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -34,7 +34,7 @@
 		};
 	};
 
-	memory@20000000 {
+	memory {
 		reg = <0x20000000 0x10000000>;
 	};
 
@@ -201,8 +201,8 @@
 			      >;
 			atmel,nand-addr-offset = <21>;
 			atmel,nand-cmd-offset = <22>;
-			gpios = <&pioC 8 0
-				 &pioC 14 0
+			gpios = <&pioD 5 0
+				 &pioD 4 0
 				 0
 				>;
 			status = "disabled";
diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi
index 67936f8..31e7be2 100644
--- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
@@ -8,7 +8,7 @@
  */
 
 / {
-	memory@20000000 {
+	memory {
 		reg = <0x20000000 0x8000000>;
 	};
 
diff --git a/arch/arm/boot/dts/usb_a9g20.dts b/arch/arm/boot/dts/usb_a9g20.dts
index 3b3c4e0..7c2399c 100644
--- a/arch/arm/boot/dts/usb_a9g20.dts
+++ b/arch/arm/boot/dts/usb_a9g20.dts
@@ -16,7 +16,7 @@
 		bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock5 rw rootfstype=ubifs";
 	};
 
-	memory@20000000 {
+	memory {
 		reg = <0x20000000 0x4000000>;
 	};
 
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 44f4a09..0511238 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,6 +2,7 @@
 #define __ASM_BARRIER_H
 
 #ifndef __ASSEMBLY__
+#include <asm/outercache.h>
 
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
@@ -39,7 +40,6 @@
 #ifdef CONFIG_ARCH_HAS_BARRIERS
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#include <asm/outercache.h>
 #define mb()		do { dsb(); outer_sync(); } while (0)
 #define rmb()		dsb()
 #define wmb()		mb()
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
new file mode 100644
index 0000000..2fca60a
--- /dev/null
+++ b/arch/arm/include/asm/cpuidle.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ARM_CPUIDLE_H
+#define __ASM_ARM_CPUIDLE_H
+
+#ifdef CONFIG_CPU_IDLE
+extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index);
+#else
+static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#endif
+
+/* Common ARM WFI state */
+#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\
+	.enter                  = arm_cpuidle_simple_enter,\
+	.exit_latency           = 1,\
+	.target_residency       = 1,\
+	.power_usage		= p,\
+	.flags                  = CPUIDLE_FLAG_TIME_VALID,\
+	.name                   = "WFI",\
+	.desc                   = "ARM WFI",\
+}
+
+/*
+ * in case power_specified == 1, give a default WFI power value needed
+ * by some governors
+ */
+#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
+
+#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index df0ac0b..9af5563 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -119,7 +119,7 @@
 #ifdef CONFIG_NEED_MACH_IO_H
 #include <mach/io.h>
 #else
-#define __io(a)		({ (void)(a); __typesafe_io(0); })
+#define __io(a)		__typesafe_io((a) & IO_SPACE_LIMIT)
 #endif
 
 /*
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8269d89..7b787d6 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -23,7 +23,7 @@
 
 obj-$(CONFIG_LEDS)		+= leds.o
 obj-$(CONFIG_OC_ETM)		+= etm.o
-
+obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
 obj-$(CONFIG_ISA_DMA_API)	+= dma.o
 obj-$(CONFIG_FIQ)		+= fiq.o fiqasm.o
 obj-$(CONFIG_MODULES)		+= armksyms.o module.o
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 632df9a..ede5f77 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -299,7 +299,6 @@
  */
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	struct pci_sys_data *root = bus->sysdata;
 	struct pci_dev *dev;
 	u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
 
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
new file mode 100644
index 0000000..89545f6
--- /dev/null
+++ b/arch/arm/kernel/cpuidle.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/cpuidle.h>
+#include <asm/proc-fns.h>
+
+int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index)
+{
+	cpu_do_idle();
+
+	return index;
+}
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
index ab312e5..b760340 100644
--- a/arch/arm/kernel/insn.c
+++ b/arch/arm/kernel/insn.c
@@ -1,3 +1,4 @@
+#include <linux/bug.h>
 #include <linux/kernel.h>
 #include <asm/opcodes.h>
 
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index ab1869d..4dd41fc 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -152,7 +152,7 @@
 
 void __kprobes arch_disarm_kprobe(struct kprobe *p)
 {
-	stop_machine(__arch_disarm_kprobe, p, &cpu_online_map);
+	stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
 }
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 45956c9..80abafb 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -256,7 +256,7 @@
 {
 	unsigned long tmp;
 
-	if (off & 3 || off >= sizeof(struct user))
+	if (off & 3)
 		return -EIO;
 
 	tmp = 0;
@@ -268,6 +268,8 @@
 		tmp = tsk->mm->end_code;
 	else if (off < sizeof(struct pt_regs))
 		tmp = get_user_reg(tsk, off >> 2);
+	else if (off >= sizeof(struct user))
+		return -EIO;
 
 	return put_user(tmp, ret);
 }
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2cee7d1..addbbe8 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -349,7 +349,7 @@
 		 * re-initialize the map in platform_smp_prepare_cpus() if
 		 * present != possible (e.g. physical hotplug).
 		 */
-		init_cpu_present(&cpu_possible_map);
+		init_cpu_present(cpu_possible_mask);
 
 		/*
 		 * Initialise the SCU if there are more than one CPU
@@ -581,8 +581,9 @@
 	unsigned long timeout;
 
 	if (num_online_cpus() > 1) {
-		cpumask_t mask = cpu_online_map;
-		cpu_clear(smp_processor_id(), mask);
+		struct cpumask mask;
+		cpumask_copy(&mask, cpu_online_mask);
+		cpumask_clear_cpu(smp_processor_id(), &mask);
 
 		smp_cross_call(&mask, IPI_CPU_STOP);
 	}
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 7e5651e..5652dde 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -598,6 +598,9 @@
 		else
 			cs_pin = spi1_standard_cs[devices[i].chip_select];
 
+		if (!gpio_is_valid(cs_pin))
+			continue;
+
 		if (devices[i].bus_num == 0)
 			enable_spi0 = 1;
 		else
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 096da87..4db961a 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -415,6 +415,9 @@
 		else
 			cs_pin = spi1_standard_cs[devices[i].chip_select];
 
+		if (!gpio_is_valid(cs_pin))
+			continue;
+
 		if (devices[i].bus_num == 0)
 			enable_spi0 = 1;
 		else
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 53688c46..fe99206 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -72,7 +72,8 @@
 	/* Enable VBus control for UHP ports */
 	for (i = 0; i < data->ports; i++) {
 		if (gpio_is_valid(data->vbus_pin[i]))
-			at91_set_gpio_output(data->vbus_pin[i], 0);
+			at91_set_gpio_output(data->vbus_pin[i],
+					     data->vbus_pin_active_low[i]);
 	}
 
 	/* Enable overcurrent notification */
@@ -671,6 +672,9 @@
 		else
 			cs_pin = spi1_standard_cs[devices[i].chip_select];
 
+		if (!gpio_is_valid(cs_pin))
+			continue;
+
 		if (devices[i].bus_num == 0)
 			enable_spi0 = 1;
 		else
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 698479f..6b008ae 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -127,12 +127,13 @@
 	/* Enable VBus control for UHP ports */
 	for (i = 0; i < data->ports; i++) {
 		if (gpio_is_valid(data->vbus_pin[i]))
-			at91_set_gpio_output(data->vbus_pin[i], 0);
+			at91_set_gpio_output(data->vbus_pin[i],
+					     data->vbus_pin_active_low[i]);
 	}
 
 	/* Enable overcurrent notification */
 	for (i = 0; i < data->ports; i++) {
-		if (data->overcurrent_pin[i])
+		if (gpio_is_valid(data->overcurrent_pin[i]))
 			at91_set_gpio_input(data->overcurrent_pin[i], 1);
 	}
 
@@ -188,7 +189,8 @@
 	/* Enable VBus control for UHP ports */
 	for (i = 0; i < data->ports; i++) {
 		if (gpio_is_valid(data->vbus_pin[i]))
-			at91_set_gpio_output(data->vbus_pin[i], 0);
+			at91_set_gpio_output(data->vbus_pin[i],
+					     data->vbus_pin_active_low[i]);
 	}
 
 	usbh_ehci_data = *data;
@@ -785,6 +787,9 @@
 		else
 			cs_pin = spi1_standard_cs[devices[i].chip_select];
 
+		if (!gpio_is_valid(cs_pin))
+			continue;
+
 		if (devices[i].bus_num == 0)
 			enable_spi0 = 1;
 		else
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index eda72e8..fe4ae22 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -419,6 +419,9 @@
 		else
 			cs_pin = spi_standard_cs[devices[i].chip_select];
 
+		if (!gpio_is_valid(cs_pin))
+			continue;
+
 		/* enable chip-select pin */
 		at91_set_gpio_output(cs_pin, 1);
 
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index b6831ee..13c8cae 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -223,6 +223,8 @@
 	CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
 	CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
 	CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
+	CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma0_clk),
+	CLKDEV_CON_DEV_ID("dma_clk", "ffffee00.dma-controller", &dma1_clk),
 	CLKDEV_CON_ID("pioA", &pioAB_clk),
 	CLKDEV_CON_ID("pioB", &pioAB_clk),
 	CLKDEV_CON_ID("pioC", &pioCD_clk),
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c
index 5400a1d..d62fe09 100644
--- a/arch/arm/mach-at91/at91x40.c
+++ b/arch/arm/mach-at91/at91x40.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <asm/proc-fns.h>
+#include <asm/system_misc.h>
 #include <asm/mach/arch.h>
 #include <mach/at91x40.h>
 #include <mach/at91_st.h>
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index 66f0ddf..2ffe50f 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -74,6 +74,7 @@
 static struct at91_usbh_data __initdata ek_usbh_data = {
 	.ports		= 2,
 	.vbus_pin	= { AT91_PIN_PA24, AT91_PIN_PA21 },
+	.vbus_pin_active_low = {1, 1},
 	.overcurrent_pin= {-EINVAL, -EINVAL},
 };
 
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index e1bea73..c88e908 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -71,6 +71,7 @@
 static struct at91_usbh_data __initdata ek_usbh_hs_data = {
 	.ports		= 2,
 	.vbus_pin	= {AT91_PIN_PD1, AT91_PIN_PD3},
+	.vbus_pin_active_low = {1, 1},
 	.overcurrent_pin= {-EINVAL, -EINVAL},
 };
 
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index 555d956..ece1f9a 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -17,9 +17,10 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/cpuidle.h>
-#include <asm/proc-fns.h>
 #include <linux/io.h>
 #include <linux/export.h>
+#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
 
 #include "pm.h"
 
@@ -27,61 +28,39 @@
 
 static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
 
-static struct cpuidle_driver at91_idle_driver = {
-	.name =         "at91_idle",
-	.owner =        THIS_MODULE,
-};
-
 /* Actual code that puts the SoC in different idle states */
 static int at91_enter_idle(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			       int index)
 {
-	struct timeval before, after;
-	int idle_time;
+	at91_standby();
 
-	local_irq_disable();
-	do_gettimeofday(&before);
-	if (index == 0)
-		/* Wait for interrupt state */
-		cpu_do_idle();
-	else if (index == 1)
-		at91_standby();
-
-	do_gettimeofday(&after);
-	local_irq_enable();
-	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
-			(after.tv_usec - before.tv_usec);
-
-	dev->last_residency = idle_time;
 	return index;
 }
 
+static struct cpuidle_driver at91_idle_driver = {
+	.name			= "at91_idle",
+	.owner			= THIS_MODULE,
+	.en_core_tk_irqen	= 1,
+	.states[0]		= ARM_CPUIDLE_WFI_STATE,
+	.states[1]		= {
+		.enter			= at91_enter_idle,
+		.exit_latency		= 10,
+		.target_residency	= 100000,
+		.flags			= CPUIDLE_FLAG_TIME_VALID,
+		.name			= "RAM_SR",
+		.desc			= "WFI and DDR Self Refresh",
+	},
+	.state_count = AT91_MAX_STATES,
+};
+
 /* Initialize CPU idle by registering the idle states */
 static int at91_init_cpuidle(void)
 {
 	struct cpuidle_device *device;
-	struct cpuidle_driver *driver = &at91_idle_driver;
 
 	device = &per_cpu(at91_cpuidle_device, smp_processor_id());
 	device->state_count = AT91_MAX_STATES;
-	driver->state_count = AT91_MAX_STATES;
-
-	/* Wait for interrupt state */
-	driver->states[0].enter = at91_enter_idle;
-	driver->states[0].exit_latency = 1;
-	driver->states[0].target_residency = 10000;
-	driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(driver->states[0].name, "WFI");
-	strcpy(driver->states[0].desc, "Wait for interrupt");
-
-	/* Wait for interrupt and RAM self refresh state */
-	driver->states[1].enter = at91_enter_idle;
-	driver->states[1].exit_latency = 10;
-	driver->states[1].target_residency = 10000;
-	driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(driver->states[1].name, "RAM_SR");
-	strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
 
 	cpuidle_register_driver(&at91_idle_driver);
 
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 544a5d5..49a8211 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -86,14 +86,15 @@
 extern void __init at91_add_device_eth(struct macb_platform_data *data);
 
  /* USB Host */
+#define AT91_MAX_USBH_PORTS	3
 struct at91_usbh_data {
-	u8		ports;		/* number of ports on root hub */
-	int		vbus_pin[2];	/* port power-control pin */
-	u8              vbus_pin_active_low[2];
+	int		vbus_pin[AT91_MAX_USBH_PORTS];	/* port power-control pin */
+	int             overcurrent_pin[AT91_MAX_USBH_PORTS];
+	u8		ports;				/* number of ports on root hub */
 	u8              overcurrent_supported;
-	int             overcurrent_pin[2];
-	u8              overcurrent_status[2];
-	u8              overcurrent_changed[2];
+	u8              vbus_pin_active_low[AT91_MAX_USBH_PORTS];
+	u8              overcurrent_status[AT91_MAX_USBH_PORTS];
+	u8              overcurrent_changed[AT91_MAX_USBH_PORTS];
 };
 extern void __init at91_add_device_usbh(struct at91_usbh_data *data);
 extern void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data);
diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-at91/include/mach/io.h
new file mode 100644
index 0000000..2d9ca04
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/io.h
@@ -0,0 +1,27 @@
+/*
+ * arch/arm/mach-at91/include/mach/io.h
+ *
+ *  Copyright (C) 2003 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
+
+#define IO_SPACE_LIMIT		0xFFFFFFFF
+#define __io(a)			__typesafe_io(a)
+
+#endif
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index 1083739..97cc04d 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -11,6 +11,7 @@
 #include <linux/pm.h>
 #include <linux/of_address.h>
 
+#include <asm/system_misc.h>
 #include <asm/mach/map.h>
 
 #include <mach/hardware.h>
diff --git a/arch/arm/mach-clps711x/edb7211-mm.c b/arch/arm/mach-clps711x/edb7211-mm.c
index 0bea145..4372f06 100644
--- a/arch/arm/mach-clps711x/edb7211-mm.c
+++ b/arch/arm/mach-clps711x/edb7211-mm.c
@@ -21,6 +21,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/bug.h>
 
 #include <mach/hardware.h>
 #include <asm/page.h>
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index a30c7c5..9107691 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/export.h>
 #include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
 
 #include <mach/cpuidle.h>
 #include <mach/ddr2.h>
@@ -30,12 +31,43 @@
 	u32 flags;
 };
 
+/* Actual code that puts the SoC in different idle states */
+static int davinci_enter_idle(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv,
+						int index)
+{
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+	struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
+
+	if (ops && ops->enter)
+		ops->enter(ops->flags);
+
+	index = cpuidle_wrap_enter(dev,	drv, index,
+				arm_cpuidle_simple_enter);
+
+	if (ops && ops->exit)
+		ops->exit(ops->flags);
+
+	return index;
+}
+
 /* fields in davinci_ops.flags */
 #define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN	BIT(0)
 
 static struct cpuidle_driver davinci_idle_driver = {
-	.name	= "cpuidle-davinci",
-	.owner	= THIS_MODULE,
+	.name			= "cpuidle-davinci",
+	.owner			= THIS_MODULE,
+	.en_core_tk_irqen	= 1,
+	.states[0]		= ARM_CPUIDLE_WFI_STATE,
+	.states[1]		= {
+		.enter			= davinci_enter_idle,
+		.exit_latency		= 10,
+		.target_residency	= 100000,
+		.flags			= CPUIDLE_FLAG_TIME_VALID,
+		.name			= "DDR SR",
+		.desc			= "WFI and DDR Self Refresh",
+	},
+	.state_count = DAVINCI_CPUIDLE_MAX_STATES,
 };
 
 static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
@@ -77,41 +109,10 @@
 	},
 };
 
-/* Actual code that puts the SoC in different idle states */
-static int davinci_enter_idle(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv,
-						int index)
-{
-	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
-	struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
-	struct timeval before, after;
-	int idle_time;
-
-	local_irq_disable();
-	do_gettimeofday(&before);
-
-	if (ops && ops->enter)
-		ops->enter(ops->flags);
-	/* Wait for interrupt state */
-	cpu_do_idle();
-	if (ops && ops->exit)
-		ops->exit(ops->flags);
-
-	do_gettimeofday(&after);
-	local_irq_enable();
-	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
-			(after.tv_usec - before.tv_usec);
-
-	dev->last_residency = idle_time;
-
-	return index;
-}
-
 static int __init davinci_cpuidle_probe(struct platform_device *pdev)
 {
 	int ret;
 	struct cpuidle_device *device;
-	struct cpuidle_driver *driver = &davinci_idle_driver;
 	struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
 
 	device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
@@ -123,27 +124,11 @@
 
 	ddr2_reg_base = pdata->ddr2_ctlr_base;
 
-	/* Wait for interrupt state */
-	driver->states[0].enter = davinci_enter_idle;
-	driver->states[0].exit_latency = 1;
-	driver->states[0].target_residency = 10000;
-	driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(driver->states[0].name, "WFI");
-	strcpy(driver->states[0].desc, "Wait for interrupt");
-
-	/* Wait for interrupt and DDR self refresh state */
-	driver->states[1].enter = davinci_enter_idle;
-	driver->states[1].exit_latency = 10;
-	driver->states[1].target_residency = 10000;
-	driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(driver->states[1].name, "DDR SR");
-	strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
 	if (pdata->ddr2_pdown)
 		davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
 	cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
 
 	device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
-	driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
 
 	ret = cpuidle_register_driver(&davinci_idle_driver);
 	if (ret) {
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index e6cc50e..8614aab 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -583,10 +583,11 @@
 #ifdef CONFIG_CACHE_L2X0
 static int __init exynos4_l2x0_cache_init(void)
 {
+	int ret;
+
 	if (soc_is_exynos5250())
 		return 0;
 
-	int ret;
 	ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
 	if (!ret) {
 		l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c
index 3983abe..69aaa45 100644
--- a/arch/arm/mach-exynos/dma.c
+++ b/arch/arm/mach-exynos/dma.c
@@ -35,8 +35,6 @@
 #include <mach/irqs.h>
 #include <mach/dma.h>
 
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
 static u8 exynos4210_pdma0_peri[] = {
 	DMACH_PCM0_RX,
 	DMACH_PCM0_TX,
diff --git a/arch/arm/mach-exynos/include/mach/debug-macro.S b/arch/arm/mach-exynos/include/mach/debug-macro.S
index 6c857ff..e0c86ea 100644
--- a/arch/arm/mach-exynos/include/mach/debug-macro.S
+++ b/arch/arm/mach-exynos/include/mach/debug-macro.S
@@ -21,10 +21,9 @@
 	 */
 
 	.macro addruart, rp, rv, tmp
-		mov	\rp, #0x10000000
-		ldr	\rp, [\rp, #0x0]
-		and	\rp, \rp, #0xf00000
-		teq	\rp, #0x500000		@@ EXYNOS5
+		mrc	p15, 0, \tmp, c0, c0, 0
+		and	\tmp, \tmp, #0xf0
+		teq	\tmp, #0xf0		@@ A15
 		ldreq	\rp, =EXYNOS5_PA_UART
 		movne	\rp, #EXYNOS4_PA_UART	@@ EXYNOS4
 		ldr	\rv, =S3C_VA_UART
diff --git a/arch/arm/mach-exynos/include/mach/uncompress.h b/arch/arm/mach-exynos/include/mach/uncompress.h
index 493f4f3..2979995 100644
--- a/arch/arm/mach-exynos/include/mach/uncompress.h
+++ b/arch/arm/mach-exynos/include/mach/uncompress.h
@@ -20,9 +20,24 @@
 
 #include <plat/uncompress.h>
 
+static unsigned int __raw_readl(unsigned int ptr)
+{
+	return *((volatile unsigned int *)ptr);
+}
+
 static void arch_detect_cpu(void)
 {
-	if (machine_is_smdk5250())
+	u32 chip_id = __raw_readl(EXYNOS_PA_CHIPID);
+
+	/*
+	 * product_id is bits 31:12
+	 *    bits 23:20 describe the exynosX family
+	 *
+	 */
+	chip_id >>= 20;
+	chip_id &= 0xf;
+
+	if (chip_id == 0x5)
 		uart_base = (volatile u8 *)EXYNOS5_PA_UART + (S3C_UART_OFFSET * CONFIG_S3C_LOWLEVEL_UART_PORT);
 	else
 		uart_base = (volatile u8 *)EXYNOS4_PA_UART + (S3C_UART_OFFSET * CONFIG_S3C_LOWLEVEL_UART_PORT);
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c
index e756d1a..aa1331e 100644
--- a/arch/arm/mach-h720x/common.c
+++ b/arch/arm/mach-h720x/common.c
@@ -24,6 +24,7 @@
 #include <asm/dma.h>
 #include <mach/hardware.h>
 #include <asm/irq.h>
+#include <asm/system_misc.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/map.h>
 #include <mach/irqs.h>
diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c
index b9a95ed..98e04f5 100644
--- a/arch/arm/mach-imx/clock-imx27.c
+++ b/arch/arm/mach-imx/clock-imx27.c
@@ -662,6 +662,7 @@
 	_REGISTER_CLOCK(NULL, "dma", dma_clk)
 	_REGISTER_CLOCK(NULL, "rtic", rtic_clk)
 	_REGISTER_CLOCK(NULL, "brom", brom_clk)
+	_REGISTER_CLOCK(NULL, "emma", emma_clk)
 	_REGISTER_CLOCK("m2m-emmaprp.0", NULL, emma_clk)
 	_REGISTER_CLOCK(NULL, "slcdc", slcdc_clk)
 	_REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
index 1e279af..e56c1a8 100644
--- a/arch/arm/mach-imx/clock-imx35.c
+++ b/arch/arm/mach-imx/clock-imx35.c
@@ -483,7 +483,7 @@
 	_REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
 	_REGISTER_CLOCK(NULL, "max", max_clk)
 	_REGISTER_CLOCK(NULL, "audmux", audmux_clk)
-	_REGISTER_CLOCK(NULL, "csi", csi_clk)
+	_REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
 	_REGISTER_CLOCK(NULL, "iim", iim_clk)
 	_REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk)
 	_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index 27bc27e..c650145 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -38,6 +38,8 @@
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 #include <linux/delay.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -479,6 +481,11 @@
 	&armadillo5x0_smc911x_device,
 };
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+	REGULATOR_SUPPLY("vddvario", "smsc911x"),
+};
+
 /*
  * Perform board specific initializations
  */
@@ -489,6 +496,8 @@
 	mxc_iomux_setup_multiple_pins(armadillo5x0_pins,
 			ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0");
 
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 	imx_add_gpio_keys(&armadillo5x0_button_data);
 	imx31_add_imx_i2c1(NULL);
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index fc78e80..15a26e9 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -24,6 +24,8 @@
 #include <linux/serial_8250.h>
 #include <linux/smsc911x.h>
 #include <linux/types.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 
 #include <asm/irq.h>
 #include <asm/mach-types.h>
@@ -166,6 +168,11 @@
 			  },
 };
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+	REGULATOR_SUPPLY("vddvario", "smsc911x"),
+};
+
 static int __init kzm_init_smsc9118(void)
 {
 	/*
@@ -175,6 +182,8 @@
 	gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2), "smsc9118-int");
 	gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2));
 
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
 	return platform_device_register(&kzm_smsc9118_device);
 }
 #else
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index 02401bb..83714b0 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -34,6 +34,8 @@
 #include <linux/mfd/mc13783.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -242,6 +244,11 @@
 static int mx31lilly_baseboard;
 core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444);
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+	REGULATOR_SUPPLY("vddvario", "smsc911x"),
+};
+
 static void __init mx31lilly_board_init(void)
 {
 	imx31_soc_init();
@@ -280,6 +287,8 @@
 	imx31_add_spi_imx1(&spi1_pdata);
 	spi_register_board_info(&mc13783_dev, 1);
 
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 
 	/* USB */
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index ef80751..0abef5f 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -29,6 +29,8 @@
 #include <linux/usb/ulpi.h>
 #include <linux/mtd/physmap.h>
 #include <linux/delay.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -226,6 +228,11 @@
 static int mx31lite_baseboard;
 core_param(mx31lite_baseboard, mx31lite_baseboard, int, 0444);
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+	REGULATOR_SUPPLY("vddvario", "smsc911x"),
+};
+
 static void __init mx31lite_init(void)
 {
 	int ret;
@@ -259,6 +266,8 @@
 	if (usbh2_pdata.otg)
 		imx31_add_mxc_ehci_hs(2, &usbh2_pdata);
 
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
 	/* SMSC9117 IRQ pin */
 	ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_SFS6), "sms9117-irq");
 	if (ret)
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index e14291d..6ae51c6 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -97,7 +97,7 @@
 static int lcd_power_gpio = -ENXIO;
 
 static int mc9s08dz60_gpiochip_match(struct gpio_chip *chip,
-						     void *data)
+						     const void *data)
 {
 	return !strcmp(chip->label, data);
 }
diff --git a/arch/arm/mach-imx/mach-mx53_ard.c b/arch/arm/mach-imx/mach-mx53_ard.c
index 753f4fc..0564198 100644
--- a/arch/arm/mach-imx/mach-mx53_ard.c
+++ b/arch/arm/mach-imx/mach-mx53_ard.c
@@ -23,6 +23,8 @@
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/smsc911x.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 
 #include <mach/common.h>
 #include <mach/hardware.h>
@@ -214,6 +216,11 @@
 	return 0;
 }
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+	REGULATOR_SUPPLY("vddvario", "smsc911x"),
+};
+
 void __init imx53_ard_common_init(void)
 {
 	mxc_iomux_v3_setup_multiple_pads(mx53_ard_pads,
@@ -232,6 +239,7 @@
 
 	imx53_ard_common_init();
 	mx53_ard_io_init();
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 
 	imx53_add_sdhci_esdhc_imx(0, &mx53_ard_sd1_data);
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index d534d7f..7412738 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -21,6 +21,7 @@
 #include <linux/err.h>
 
 #include <asm/pgtable.h>
+#include <asm/system_misc.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/map.h>
 
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index 51af9fa..05250ae 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 
+#include <asm/system_misc.h>
 #include <asm/mach/map.h>
 
 #include <mach/hardware.h>
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index d2c2dc3..d345424 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -36,6 +36,7 @@
 #include <asm/irq.h>
 #include <asm/tlbflush.h>
 #include <asm/pgtable.h>
+#include <asm/system_misc.h>
 
 #include <asm/mach/map.h>
 #include <asm/mach/time.h>
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index c60e7b8..ebbd7fc 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -37,6 +37,7 @@
 #include <asm/page.h>
 #include <asm/irq.h>
 #include <asm/sched_clock.h>
+#include <asm/system_misc.h>
 
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c
index 7088180..0f17109 100644
--- a/arch/arm/mach-kirkwood/cpuidle.c
+++ b/arch/arm/mach-kirkwood/cpuidle.c
@@ -20,77 +20,47 @@
 #include <linux/io.h>
 #include <linux/export.h>
 #include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
 #include <mach/kirkwood.h>
 
 #define KIRKWOOD_MAX_STATES	2
 
-static struct cpuidle_driver kirkwood_idle_driver = {
-	.name =         "kirkwood_idle",
-	.owner =        THIS_MODULE,
-};
-
-static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
-
 /* Actual code that puts the SoC in different idle states */
 static int kirkwood_enter_idle(struct cpuidle_device *dev,
 				struct cpuidle_driver *drv,
 			       int index)
 {
-	struct timeval before, after;
-	int idle_time;
-
-	local_irq_disable();
-	do_gettimeofday(&before);
-	if (index == 0)
-		/* Wait for interrupt state */
-		cpu_do_idle();
-	else if (index == 1) {
-		/*
-		 * Following write will put DDR in self refresh.
-		 * Note that we have 256 cycles before DDR puts it
-		 * self in self-refresh, so the wait-for-interrupt
-		 * call afterwards won't get the DDR from self refresh
-		 * mode.
-		 */
-		writel(0x7, DDR_OPERATION_BASE);
-		cpu_do_idle();
-	}
-	do_gettimeofday(&after);
-	local_irq_enable();
-	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
-			(after.tv_usec - before.tv_usec);
-
-	/* Update last residency */
-	dev->last_residency = idle_time;
+	writel(0x7, DDR_OPERATION_BASE);
+	cpu_do_idle();
 
 	return index;
 }
 
+static struct cpuidle_driver kirkwood_idle_driver = {
+	.name			= "kirkwood_idle",
+	.owner			= THIS_MODULE,
+	.en_core_tk_irqen	= 1,
+	.states[0]		= ARM_CPUIDLE_WFI_STATE,
+	.states[1]		= {
+		.enter			= kirkwood_enter_idle,
+		.exit_latency		= 10,
+		.target_residency	= 100000,
+		.flags			= CPUIDLE_FLAG_TIME_VALID,
+		.name			= "DDR SR",
+		.desc			= "WFI and DDR Self Refresh",
+	},
+	.state_count = KIRKWOOD_MAX_STATES,
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
+
 /* Initialize CPU idle by registering the idle states */
 static int kirkwood_init_cpuidle(void)
 {
 	struct cpuidle_device *device;
-	struct cpuidle_driver *driver = &kirkwood_idle_driver;
 
 	device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
 	device->state_count = KIRKWOOD_MAX_STATES;
-	driver->state_count = KIRKWOOD_MAX_STATES;
-
-	/* Wait for interrupt state */
-	driver->states[0].enter = kirkwood_enter_idle;
-	driver->states[0].exit_latency = 1;
-	driver->states[0].target_residency = 10000;
-	driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(driver->states[0].name, "WFI");
-	strcpy(driver->states[0].desc, "Wait for interrupt");
-
-	/* Wait for interrupt and DDR self refresh state */
-	driver->states[1].enter = kirkwood_enter_idle;
-	driver->states[1].exit_latency = 10;
-	driver->states[1].target_residency = 10000;
-	driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(driver->states[1].name, "DDR SR");
-	strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
 
 	cpuidle_register_driver(&kirkwood_idle_driver);
 	if (cpuidle_register_device(device)) {
diff --git a/arch/arm/mach-msm/include/mach/uncompress.h b/arch/arm/mach-msm/include/mach/uncompress.h
index 169a840..c14011f 100644
--- a/arch/arm/mach-msm/include/mach/uncompress.h
+++ b/arch/arm/mach-msm/include/mach/uncompress.h
@@ -16,6 +16,7 @@
 #ifndef __ASM_ARCH_MSM_UNCOMPRESS_H
 #define __ASM_ARCH_MSM_UNCOMPRESS_H
 
+#include <asm/barrier.h>
 #include <asm/processor.h>
 #include <mach/msm_iomap.h>
 
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index 0c56a5a..c56df9e 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -203,15 +203,9 @@
 	return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
 }
 
-static int debug_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static const struct file_operations debug_ops = {
 	.read = debug_read,
-	.open = debug_open,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
diff --git a/arch/arm/mach-omap1/flash.c b/arch/arm/mach-omap1/flash.c
index f9bf78d4..401eb3c 100644
--- a/arch/arm/mach-omap1/flash.c
+++ b/arch/arm/mach-omap1/flash.c
@@ -17,20 +17,12 @@
 
 void omap1_set_vpp(struct platform_device *pdev, int enable)
 {
-	static int count;
 	u32 l;
 
-	if (enable) {
-		if (count++ == 0) {
-			l = omap_readl(EMIFS_CONFIG);
-			l |= OMAP_EMIFS_CONFIG_WP;
-			omap_writel(l, EMIFS_CONFIG);
-		}
-	} else {
-		if (count && (--count == 0)) {
-			l = omap_readl(EMIFS_CONFIG);
-			l &= ~OMAP_EMIFS_CONFIG_WP;
-			omap_writel(l, EMIFS_CONFIG);
-		}
-	}
+	l = omap_readl(EMIFS_CONFIG);
+	if (enable)
+		l |= OMAP_EMIFS_CONFIG_WP;
+	else
+		l &= ~OMAP_EMIFS_CONFIG_WP;
+	omap_writel(l, EMIFS_CONFIG);
 }
diff --git a/arch/arm/mach-omap1/include/mach/io.h b/arch/arm/mach-omap1/include/mach/io.h
new file mode 100644
index 0000000..ce4f800
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/io.h
@@ -0,0 +1,45 @@
+/*
+ * arch/arm/mach-omap1/include/mach/io.h
+ *
+ * IO definitions for TI OMAP processors and boards
+ *
+ * Copied from arch/arm/mach-sa1100/include/mach/io.h
+ * Copyright (C) 1997-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Modifications:
+ *  06-12-1997	RMK	Created.
+ *  07-04-1999	RMK	Major cleanup
+ */
+
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+/*
+ * We don't actually have real ISA nor PCI buses, but there is so many
+ * drivers out there that might just work if we fake them...
+ */
+#define __io(a)		__typesafe_io(a)
+
+#endif
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 41b0a2f..909a8b9 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -26,6 +26,7 @@
 
 #include <linux/i2c/at24.h>
 #include <linux/i2c/twl.h>
+#include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
 #include <linux/mmc/host.h>
 
@@ -81,8 +82,23 @@
 	.flags		= SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
 };
 
+static struct regulator_consumer_supply cm_t35_smsc911x_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
+static struct regulator_consumer_supply sb_t35_smsc911x_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.1"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.1"),
+};
+
 static void __init cm_t35_init_ethernet(void)
 {
+	regulator_register_fixed(0, cm_t35_smsc911x_supplies,
+				 ARRAY_SIZE(cm_t35_smsc911x_supplies));
+	regulator_register_fixed(1, sb_t35_smsc911x_supplies,
+				 ARRAY_SIZE(sb_t35_smsc911x_supplies));
+
 	gpmc_smsc911x_init(&cm_t35_smsc911x_cfg);
 	gpmc_smsc911x_init(&sb_t35_smsc911x_cfg);
 }
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index e558800..930c0d3 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -634,8 +634,14 @@
 static inline void __init igep_wlan_bt_init(void) { }
 #endif
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
 static void __init igep_init(void)
 {
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 
 	/* Get IGEP2 hardware revision */
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index d50a562a..1b60495 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -22,6 +22,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/spi/spi.h>
+#include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
 #include <linux/i2c/twl.h>
 #include <linux/io.h>
@@ -410,8 +411,14 @@
 
 };
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
 static void __init omap_ldp_init(void)
 {
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 	ldp_init_smsc911x();
 	omap_i2c_init();
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 4c90f07..49df127 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -114,15 +114,6 @@
 
 static inline void __init omap3evm_init_smsc911x(void)
 {
-	struct clk *l3ck;
-	unsigned int rate;
-
-	l3ck = clk_get(NULL, "l3_ck");
-	if (IS_ERR(l3ck))
-		rate = 100000000;
-	else
-		rate = clk_get_rate(l3ck);
-
 	/* Configure ethernet controller reset gpio */
 	if (cpu_is_omap3430()) {
 		if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
@@ -632,9 +623,15 @@
 #endif
 }
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
 static void __init omap3_evm_init(void)
 {
 	omap3_evm_get_revision();
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 
 	if (cpu_is_omap3630())
 		omap3_mux_init(omap36x_board_mux, OMAP_PACKAGE_CBB);
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 4a7d8c8..9b3c141 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -23,6 +23,7 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 
+#include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
 
 #include <linux/i2c/twl.h>
@@ -188,8 +189,14 @@
 };
 #endif
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
 static void __init omap3logic_init(void)
 {
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 	omap3torpedo_fix_pbias_voltage();
 	omap3logic_i2c_init();
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 6410043..4dffc95 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -24,6 +24,7 @@
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
 
+#include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
 #include <linux/i2c/twl.h>
 #include <linux/mmc/host.h>
@@ -72,15 +73,6 @@
 
 static inline void __init omap3stalker_init_eth(void)
 {
-	struct clk *l3ck;
-	unsigned int rate;
-
-	l3ck = clk_get(NULL, "l3_ck");
-	if (IS_ERR(l3ck))
-		rate = 100000000;
-	else
-		rate = clk_get_rate(l3ck);
-
 	omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP);
 	gpmc_smsc911x_init(&smsc911x_cfg);
 }
@@ -419,8 +411,14 @@
 };
 #endif
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
 static void __init omap3_stalker_init(void)
 {
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
 	omap_board_config = omap3_stalker_config;
 	omap_board_config_size = ARRAY_SIZE(omap3_stalker_config);
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 668533e..33aa391 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -498,10 +498,18 @@
 	{ OVERO_GPIO_BT_NRESET, GPIOF_OUT_INIT_HIGH,	"lcd bl enable" },
 };
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+	REGULATOR_SUPPLY("vddvario", "smsc911x.1"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.1"),
+};
+
 static void __init overo_init(void)
 {
 	int ret;
 
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 	omap_hsmmc_init(mmc);
 	overo_i2c_init();
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index 1e8540e..f64f441 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -14,6 +14,9 @@
 #include <linux/smsc911x.h>
 #include <linux/interrupt.h>
 
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
+
 #include <plat/gpmc.h>
 #include <plat/gpmc-smsc911x.h>
 
@@ -117,11 +120,17 @@
 	&zoom_debugboard_serial_device,
 };
 
+static struct regulator_consumer_supply dummy_supplies[] = {
+	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
 int __init zoom_debugboard_init(void)
 {
 	if (!omap_zoom_debugboard_detect())
 		return 0;
 
+	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	zoom_init_smsc911x();
 	zoom_init_quaduart();
 	return platform_add_devices(zoom_devices, ARRAY_SIZE(zoom_devices));
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 480fb8f..f4a626f7 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -747,7 +747,7 @@
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-	.clksel_mask	= OMAP3430_CLKSEL_TV_MASK,
+	.clksel_mask	= OMAP3630_CLKSEL_TV_MASK,
 	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
@@ -832,7 +832,7 @@
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-	.clksel_mask	= OMAP3430_CLKSEL_DSS1_MASK,
+	.clksel_mask	= OMAP3630_CLKSEL_DSS1_MASK,
 	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
@@ -859,7 +859,7 @@
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
-	.clksel_mask	= OMAP3430_CLKSEL_CAM_MASK,
+	.clksel_mask	= OMAP3630_CLKSEL_CAM_MASK,
 	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.set_rate	= &omap2_clksel_set_rate,
@@ -886,7 +886,7 @@
 	.parent		= &dpll4_ck,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP3430_DIV_DPLL4_MASK,
+	.clksel_mask	= OMAP3630_DIV_DPLL4_MASK,
 	.clksel		= dpll4_clksel,
 	.clkdm_name	= "dpll4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
@@ -1394,6 +1394,7 @@
 	.name		= "cpefuse_fck",
 	.ops		= &clkops_omap2_dflt,
 	.parent		= &sys_ck,
+	.clkdm_name	= "core_l4_clkdm",
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
 	.enable_bit	= OMAP3430ES2_EN_CPEFUSE_SHIFT,
 	.recalc		= &followparent_recalc,
@@ -1403,6 +1404,7 @@
 	.name		= "ts_fck",
 	.ops		= &clkops_omap2_dflt,
 	.parent		= &omap_32k_fck,
+	.clkdm_name	= "core_l4_clkdm",
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
 	.enable_bit	= OMAP3430ES2_EN_TS_SHIFT,
 	.recalc		= &followparent_recalc,
@@ -1412,6 +1414,7 @@
 	.name		= "usbtll_fck",
 	.ops		= &clkops_omap2_dflt_wait,
 	.parent		= &dpll5_m2_ck,
+	.clkdm_name	= "core_l4_clkdm",
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
 	.enable_bit	= OMAP3430ES2_EN_USBTLL_SHIFT,
 	.recalc		= &followparent_recalc,
@@ -1617,6 +1620,7 @@
 	.name		= "fshostusb_fck",
 	.ops		= &clkops_omap2_dflt_wait,
 	.parent		= &core_48m_fck,
+	.clkdm_name	= "core_l4_clkdm",
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
 	.enable_bit	= OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
 	.recalc		= &followparent_recalc,
@@ -2043,6 +2047,7 @@
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
 	.enable_bit	= OMAP3430_EN_OMAPCTRL_SHIFT,
 	.flags		= ENABLE_ON_INIT,
+	.clkdm_name	= "core_l4_clkdm",
 	.recalc		= &followparent_recalc,
 };
 
@@ -2094,6 +2099,7 @@
 	.clksel_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
 	.clksel_mask	= OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
 	.clksel		= usb_l4_clksel,
+	.clkdm_name	= "core_l4_clkdm",
 	.recalc		= &omap2_clksel_recalc,
 };
 
@@ -3467,8 +3473,8 @@
 	CLK(NULL,	"ipss_ick",	&ipss_ick,	CK_AM35XX),
 	CLK(NULL,	"rmii_ck",	&rmii_ck,	CK_AM35XX),
 	CLK(NULL,	"pclk_ck",	&pclk_ck,	CK_AM35XX),
-	CLK("davinci_emac",	"emac_clk",	&emac_ick,	CK_AM35XX),
-	CLK("davinci_emac",	"phy_clk",	&emac_fck,	CK_AM35XX),
+	CLK("davinci_emac",	NULL,	&emac_ick,	CK_AM35XX),
+	CLK("davinci_mdio.0",	NULL,	&emac_fck,	CK_AM35XX),
 	CLK("vpfe-capture",	"master",	&vpfe_ick,	CK_AM35XX),
 	CLK("vpfe-capture",	"slave",	&vpfe_fck,	CK_AM35XX),
 	CLK("musb-am35x",	"ick",		&hsotgusb_ick_am35xx,	CK_AM35XX),
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index c03c110..fa6ea65 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -957,8 +957,8 @@
 	.modes		= (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
 	.autoidle_reg	= OMAP4430_CM_AUTOIDLE_DPLL_USB,
 	.idlest_reg	= OMAP4430_CM_IDLEST_DPLL_USB,
-	.mult_mask	= OMAP4430_DPLL_MULT_MASK,
-	.div1_mask	= OMAP4430_DPLL_DIV_MASK,
+	.mult_mask	= OMAP4430_DPLL_MULT_USB_MASK,
+	.div1_mask	= OMAP4430_DPLL_DIV_0_7_MASK,
 	.enable_mask	= OMAP4430_DPLL_EN_MASK,
 	.autoidle_mask	= OMAP4430_AUTO_DPLL_MODE_MASK,
 	.idlest_mask	= OMAP4430_ST_DPLL_CLK_MASK,
@@ -978,6 +978,7 @@
 	.recalc		= &omap3_dpll_recalc,
 	.round_rate	= &omap2_dpll_round_rate,
 	.set_rate	= &omap3_noncore_dpll_set_rate,
+	.clkdm_name	= "l3_init_clkdm",
 };
 
 static struct clk dpll_usb_clkdcoldo_ck = {
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index 9299ac2..bd7ed13 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -390,7 +390,7 @@
 	.prcm_partition	  = OMAP4430_PRM_PARTITION,
 	.cm_inst	  = OMAP4430_PRM_EMU_CM_INST,
 	.clkdm_offs	  = OMAP4430_PRM_EMU_CM_EMU_CDOFFS,
-	.flags		  = CLKDM_CAN_HWSUP,
+	.flags		  = CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_FORCE_WAKEUP,
 };
 
 static struct clockdomain l3_dma_44xx_clkdm = {
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 464cffd..5358664 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -87,29 +87,14 @@
 	return 0;
 }
 
-/**
- * omap3_enter_idle - Programs OMAP3 to enter the specified state
- * @dev: cpuidle device
- * @drv: cpuidle driver
- * @index: the index of state to be entered
- *
- * Called from the CPUidle framework to program the device to the
- * specified target state selected by the governor.
- */
-static int omap3_enter_idle(struct cpuidle_device *dev,
+static int __omap3_enter_idle(struct cpuidle_device *dev,
 				struct cpuidle_driver *drv,
 				int index)
 {
 	struct omap3_idle_statedata *cx =
 			cpuidle_get_statedata(&dev->states_usage[index]);
-	struct timespec ts_preidle, ts_postidle, ts_idle;
 	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
-	int idle_time;
 
-	/* Used to keep track of the total time in idle */
-	getnstimeofday(&ts_preidle);
-
-	local_irq_disable();
 	local_fiq_disable();
 
 	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
@@ -148,22 +133,29 @@
 	}
 
 return_sleep_time:
-	getnstimeofday(&ts_postidle);
-	ts_idle = timespec_sub(ts_postidle, ts_preidle);
 
-	local_irq_enable();
 	local_fiq_enable();
 
-	idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
-								USEC_PER_SEC;
-
-	/* Update cpuidle counters */
-	dev->last_residency = idle_time;
-
 	return index;
 }
 
 /**
+ * omap3_enter_idle - Programs OMAP3 to enter the specified state
+ * @dev: cpuidle device
+ * @drv: cpuidle driver
+ * @index: the index of state to be entered
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static inline int omap3_enter_idle(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv,
+				int index)
+{
+	return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
+}
+
+/**
  * next_valid_state - Find next valid C-state
  * @dev: cpuidle device
  * @drv: cpuidle driver
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 72e018b..f386cbe 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -62,15 +62,9 @@
 {
 	struct omap4_idle_statedata *cx =
 			cpuidle_get_statedata(&dev->states_usage[index]);
-	struct timespec ts_preidle, ts_postidle, ts_idle;
 	u32 cpu1_state;
-	int idle_time;
 	int cpu_id = smp_processor_id();
 
-	/* Used to keep track of the total time in idle */
-	getnstimeofday(&ts_preidle);
-
-	local_irq_disable();
 	local_fiq_disable();
 
 	/*
@@ -128,26 +122,17 @@
 	if (index > 0)
 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
 
-	getnstimeofday(&ts_postidle);
-	ts_idle = timespec_sub(ts_postidle, ts_preidle);
-
-	local_irq_enable();
 	local_fiq_enable();
 
-	idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
-								USEC_PER_SEC;
-
-	/* Update cpuidle counters */
-	dev->last_residency = idle_time;
-
 	return index;
 }
 
 DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
 
 struct cpuidle_driver omap4_idle_driver = {
-	.name =		"omap4_idle",
-	.owner =	THIS_MODULE,
+	.name				= "omap4_idle",
+	.owner				= THIS_MODULE,
+	.en_core_tk_irqen		= 1,
 };
 
 static inline void _fill_cstate(struct cpuidle_driver *drv,
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c
index 5e5880d..b6c77be 100644
--- a/arch/arm/mach-omap2/gpmc-smsc911x.c
+++ b/arch/arm/mach-omap2/gpmc-smsc911x.c
@@ -19,15 +19,11 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/smsc911x.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
 
 #include <plat/board.h>
 #include <plat/gpmc.h>
 #include <plat/gpmc-smsc911x.h>
 
-static struct omap_smsc911x_platform_data *gpmc_cfg;
-
 static struct resource gpmc_smsc911x_resources[] = {
 	[0] = {
 		.flags		= IORESOURCE_MEM,
@@ -41,51 +37,6 @@
 	.phy_interface	= PHY_INTERFACE_MODE_MII,
 	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
 	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
-	.flags		= SMSC911X_USE_16BIT,
-};
-
-static struct regulator_consumer_supply gpmc_smsc911x_supply[] = {
-	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
-	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-/* Generic regulator definition to satisfy smsc911x */
-static struct regulator_init_data gpmc_smsc911x_reg_init_data = {
-	.constraints = {
-		.min_uV			= 3300000,
-		.max_uV			= 3300000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(gpmc_smsc911x_supply),
-	.consumer_supplies	= gpmc_smsc911x_supply,
-};
-
-static struct fixed_voltage_config gpmc_smsc911x_fixed_reg_data = {
-	.supply_name		= "gpmc_smsc911x",
-	.microvolts		= 3300000,
-	.gpio			= -EINVAL,
-	.startup_delay		= 0,
-	.enable_high		= 0,
-	.enabled_at_boot	= 1,
-	.init_data		= &gpmc_smsc911x_reg_init_data,
-};
-
-/*
- * Platform device id of 42 is a temporary fix to avoid conflicts
- * with other reg-fixed-voltage devices. The real fix should
- * involve the driver core providing a way of dynamically
- * assigning a unique id on registration for platform devices
- * in the same name space.
- */
-static struct platform_device gpmc_smsc911x_regulator = {
-	.name		= "reg-fixed-voltage",
-	.id		= 42,
-	.dev = {
-		.platform_data	= &gpmc_smsc911x_fixed_reg_data,
-	},
 };
 
 /*
@@ -93,23 +44,12 @@
  * assume that pin multiplexing is done in the board-*.c file,
  * or in the bootloader.
  */
-void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data)
+void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *gpmc_cfg)
 {
 	struct platform_device *pdev;
 	unsigned long cs_mem_base;
 	int ret;
 
-	gpmc_cfg = board_data;
-
-	if (!gpmc_cfg->id) {
-		ret = platform_device_register(&gpmc_smsc911x_regulator);
-		if (ret < 0) {
-			pr_err("Unable to register smsc911x regulators: %d\n",
-			       ret);
-			return;
-		}
-	}
-
 	if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) {
 		pr_err("Failed to request GPMC mem region\n");
 		return;
@@ -139,8 +79,7 @@
 		gpio_set_value(gpmc_cfg->gpio_reset, 1);
 	}
 
-	if (gpmc_cfg->flags)
-		gpmc_smsc911x_config.flags = gpmc_cfg->flags;
+	gpmc_smsc911x_config.flags = gpmc_cfg->flags ? : SMSC911X_USE_16BIT;
 
 	pdev = platform_device_register_resndata(NULL, "smsc911x", gpmc_cfg->id,
 		 gpmc_smsc911x_resources, ARRAY_SIZE(gpmc_smsc911x_resources),
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 100db62..b0268ea 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -506,6 +506,13 @@
 	if (oh->dev_attr != NULL) {
 		mmc_dev_attr = oh->dev_attr;
 		mmc_data->controller_flags = mmc_dev_attr->flags;
+		/*
+		 * erratum 2.1.1.128 doesn't apply if board has
+		 * a transceiver is attached
+		 */
+		if (hsmmcinfo->transceiver)
+			mmc_data->controller_flags &=
+				~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ;
 	}
 
 	pdev = platform_device_alloc(name, ctrl_nr - 1);
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
index 4fa72c7..1c582a8 100644
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ b/arch/arm/mach-omap2/include/mach/barriers.h
@@ -22,6 +22,8 @@
 #ifndef __MACH_BARRIERS_H
 #define __MACH_BARRIERS_H
 
+#include <asm/outercache.h>
+
 extern void omap_bus_sync(void);
 
 #define rmb()		dsb()
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index eba6cd3..2c27fdb 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1395,7 +1395,7 @@
  */
 static int _ocp_softreset(struct omap_hwmod *oh)
 {
-	u32 v;
+	u32 v, softrst_mask;
 	int c = 0;
 	int ret = 0;
 
@@ -1427,11 +1427,13 @@
 						    oh->class->sysc->syss_offs)
 				   & SYSS_RESETDONE_MASK),
 				  MAX_MODULE_SOFTRESET_WAIT, c);
-	else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS)
+	else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
+		softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
 		omap_test_timeout(!(omap_hwmod_read(oh,
 						     oh->class->sysc->sysc_offs)
-				   & SYSC_TYPE2_SOFTRESET_MASK),
+				   & softrst_mask),
 				  MAX_MODULE_SOFTRESET_WAIT, c);
+	}
 
 	if (c == MAX_MODULE_SOFTRESET_WAIT)
 		pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
@@ -1477,6 +1479,11 @@
 
 	ret = (oh->class->reset) ? oh->class->reset(oh) : _ocp_softreset(oh);
 
+	if (oh->class->sysc) {
+		_update_sysc_cache(oh);
+		_enable_sysc(oh);
+	}
+
 	return ret;
 }
 
@@ -1786,20 +1793,9 @@
 		return 0;
 	}
 
-	if (!(oh->flags & HWMOD_INIT_NO_RESET)) {
+	if (!(oh->flags & HWMOD_INIT_NO_RESET))
 		_reset(oh);
 
-		/*
-		 * OCP_SYSCONFIG bits need to be reprogrammed after a softreset.
-		 * The _enable() function should be split to
-		 * avoid the rewrite of the OCP_SYSCONFIG register.
-		 */
-		if (oh->class->sysc) {
-			_update_sysc_cache(oh);
-			_enable_sysc(oh);
-		}
-	}
-
 	postsetup_state = oh->_postsetup_state;
 	if (postsetup_state == _HWMOD_STATE_UNKNOWN)
 		postsetup_state = _HWMOD_STATE_ENABLED;
@@ -1907,20 +1903,10 @@
  */
 int omap_hwmod_softreset(struct omap_hwmod *oh)
 {
-	u32 v;
-	int ret;
-
-	if (!oh || !(oh->_sysc_cache))
+	if (!oh)
 		return -EINVAL;
 
-	v = oh->_sysc_cache;
-	ret = _set_softreset(oh, &v);
-	if (ret)
-		goto error;
-	_write_sysconfig(v, oh);
-
-error:
-	return ret;
+	return _ocp_softreset(oh);
 }
 
 /**
@@ -2463,26 +2449,28 @@
  * @oh: struct omap_hwmod *
  *
  * Sets the module OCP socket ENAWAKEUP bit to allow the module to
- * send wakeups to the PRCM.  Eventually this should sets PRCM wakeup
- * registers to cause the PRCM to receive wakeup events from the
- * module.  Does not set any wakeup routing registers beyond this
- * point - if the module is to wake up any other module or subsystem,
- * that must be set separately.  Called by omap_device code.  Returns
- * -EINVAL on error or 0 upon success.
+ * send wakeups to the PRCM, and enable I/O ring wakeup events for
+ * this IP block if it has dynamic mux entries.  Eventually this
+ * should set PRCM wakeup registers to cause the PRCM to receive
+ * wakeup events from the module.  Does not set any wakeup routing
+ * registers beyond this point - if the module is to wake up any other
+ * module or subsystem, that must be set separately.  Called by
+ * omap_device code.  Returns -EINVAL on error or 0 upon success.
  */
 int omap_hwmod_enable_wakeup(struct omap_hwmod *oh)
 {
 	unsigned long flags;
 	u32 v;
 
-	if (!oh->class->sysc ||
-	    !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
-		return -EINVAL;
-
 	spin_lock_irqsave(&oh->_lock, flags);
-	v = oh->_sysc_cache;
-	_enable_wakeup(oh, &v);
-	_write_sysconfig(v, oh);
+
+	if (oh->class->sysc &&
+	    (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) {
+		v = oh->_sysc_cache;
+		_enable_wakeup(oh, &v);
+		_write_sysconfig(v, oh);
+	}
+
 	_set_idle_ioring_wakeup(oh, true);
 	spin_unlock_irqrestore(&oh->_lock, flags);
 
@@ -2494,26 +2482,28 @@
  * @oh: struct omap_hwmod *
  *
  * Clears the module OCP socket ENAWAKEUP bit to prevent the module
- * from sending wakeups to the PRCM.  Eventually this should clear
- * PRCM wakeup registers to cause the PRCM to ignore wakeup events
- * from the module.  Does not set any wakeup routing registers beyond
- * this point - if the module is to wake up any other module or
- * subsystem, that must be set separately.  Called by omap_device
- * code.  Returns -EINVAL on error or 0 upon success.
+ * from sending wakeups to the PRCM, and disable I/O ring wakeup
+ * events for this IP block if it has dynamic mux entries.  Eventually
+ * this should clear PRCM wakeup registers to cause the PRCM to ignore
+ * wakeup events from the module.  Does not set any wakeup routing
+ * registers beyond this point - if the module is to wake up any other
+ * module or subsystem, that must be set separately.  Called by
+ * omap_device code.  Returns -EINVAL on error or 0 upon success.
  */
 int omap_hwmod_disable_wakeup(struct omap_hwmod *oh)
 {
 	unsigned long flags;
 	u32 v;
 
-	if (!oh->class->sysc ||
-	    !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
-		return -EINVAL;
-
 	spin_lock_irqsave(&oh->_lock, flags);
-	v = oh->_sysc_cache;
-	_disable_wakeup(oh, &v);
-	_write_sysconfig(v, oh);
+
+	if (oh->class->sysc &&
+	    (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) {
+		v = oh->_sysc_cache;
+		_disable_wakeup(oh, &v);
+		_write_sysconfig(v, oh);
+	}
+
 	_set_idle_ioring_wakeup(oh, false);
 	spin_unlock_irqrestore(&oh->_lock, flags);
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 08daa5e..cc9bd10 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2996,6 +2996,11 @@
 	&omap44xx_l4_abe__mcbsp1_dma,
 };
 
+static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
+	{ .role = "pad_fck", .clk = "pad_clks_ck" },
+	{ .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+};
+
 static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
 	.name		= "mcbsp1",
 	.class		= &omap44xx_mcbsp_hwmod_class,
@@ -3012,6 +3017,8 @@
 	},
 	.slaves		= omap44xx_mcbsp1_slaves,
 	.slaves_cnt	= ARRAY_SIZE(omap44xx_mcbsp1_slaves),
+	.opt_clks	= mcbsp1_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcbsp1_opt_clks),
 };
 
 /* mcbsp2 */
@@ -3071,6 +3078,11 @@
 	&omap44xx_l4_abe__mcbsp2_dma,
 };
 
+static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
+	{ .role = "pad_fck", .clk = "pad_clks_ck" },
+	{ .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+};
+
 static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
 	.name		= "mcbsp2",
 	.class		= &omap44xx_mcbsp_hwmod_class,
@@ -3087,6 +3099,8 @@
 	},
 	.slaves		= omap44xx_mcbsp2_slaves,
 	.slaves_cnt	= ARRAY_SIZE(omap44xx_mcbsp2_slaves),
+	.opt_clks	= mcbsp2_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcbsp2_opt_clks),
 };
 
 /* mcbsp3 */
@@ -3146,6 +3160,11 @@
 	&omap44xx_l4_abe__mcbsp3_dma,
 };
 
+static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
+	{ .role = "pad_fck", .clk = "pad_clks_ck" },
+	{ .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+};
+
 static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
 	.name		= "mcbsp3",
 	.class		= &omap44xx_mcbsp_hwmod_class,
@@ -3162,6 +3181,8 @@
 	},
 	.slaves		= omap44xx_mcbsp3_slaves,
 	.slaves_cnt	= ARRAY_SIZE(omap44xx_mcbsp3_slaves),
+	.opt_clks	= mcbsp3_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcbsp3_opt_clks),
 };
 
 /* mcbsp4 */
@@ -3200,6 +3221,11 @@
 	&omap44xx_l4_per__mcbsp4,
 };
 
+static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
+	{ .role = "pad_fck", .clk = "pad_clks_ck" },
+	{ .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+};
+
 static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
 	.name		= "mcbsp4",
 	.class		= &omap44xx_mcbsp_hwmod_class,
@@ -3216,6 +3242,8 @@
 	},
 	.slaves		= omap44xx_mcbsp4_slaves,
 	.slaves_cnt	= ARRAY_SIZE(omap44xx_mcbsp4_slaves),
+	.opt_clks	= mcbsp4_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcbsp4_opt_clks),
 };
 
 /*
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
index 9262a6b..de6d464 100644
--- a/arch/arm/mach-omap2/opp.c
+++ b/arch/arm/mach-omap2/opp.c
@@ -64,10 +64,10 @@
 		}
 		oh = omap_hwmod_lookup(opp_def->hwmod_name);
 		if (!oh || !oh->od) {
-			pr_warn("%s: no hwmod or odev for %s, [%d] "
+			pr_debug("%s: no hwmod or odev for %s, [%d] "
 				"cannot add OPPs.\n", __func__,
 				opp_def->hwmod_name, i);
-			return -EINVAL;
+			continue;
 		}
 		dev = &oh->od->pdev->dev;
 
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 238defc..703bd10 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -153,8 +153,7 @@
 		pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
 		/* Following is for error tracking, it should not happen */
 		if (ret) {
-			printk(KERN_ERR "save_secure_sram() returns %08x\n",
-				ret);
+			pr_err("save_secure_sram() returns %08x\n", ret);
 			while (1)
 				;
 		}
@@ -289,7 +288,7 @@
 		break;
 	default:
 		/* Invalid state */
-		printk(KERN_ERR "Invalid mpu state in sram_idle\n");
+		pr_err("Invalid mpu state in sram_idle\n");
 		return;
 	}
 
@@ -439,18 +438,17 @@
 	list_for_each_entry(pwrst, &pwrst_list, node) {
 		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
 		if (state > pwrst->next_state) {
-			printk(KERN_INFO "Powerdomain (%s) didn't enter "
-			       "target state %d\n",
+			pr_info("Powerdomain (%s) didn't enter "
+				"target state %d\n",
 			       pwrst->pwrdm->name, pwrst->next_state);
 			ret = -1;
 		}
 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
 	}
 	if (ret)
-		printk(KERN_ERR "Could not enter target state in pm_suspend\n");
+		pr_err("Could not enter target state in pm_suspend\n");
 	else
-		printk(KERN_INFO "Successfully put all powerdomains "
-		       "to target state\n");
+		pr_info("Successfully put all powerdomains to target state\n");
 
 	return ret;
 }
@@ -734,21 +732,22 @@
 
 	if (ret) {
 		pr_err("pm: Failed to request pm_io irq\n");
-		goto err1;
+		goto err2;
 	}
 
 	ret = pwrdm_for_each(pwrdms_setup, NULL);
 	if (ret) {
-		printk(KERN_ERR "Failed to setup powerdomains\n");
-		goto err2;
+		pr_err("Failed to setup powerdomains\n");
+		goto err3;
 	}
 
 	(void) clkdm_for_each(omap_pm_clkdms_setup, NULL);
 
 	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
 	if (mpu_pwrdm == NULL) {
-		printk(KERN_ERR "Failed to get mpu_pwrdm\n");
-		goto err2;
+		pr_err("Failed to get mpu_pwrdm\n");
+		ret = -EINVAL;
+		goto err3;
 	}
 
 	neon_pwrdm = pwrdm_lookup("neon_pwrdm");
@@ -781,8 +780,8 @@
 		omap3_secure_ram_storage =
 			kmalloc(0x803F, GFP_KERNEL);
 		if (!omap3_secure_ram_storage)
-			printk(KERN_ERR "Memory allocation failed when"
-					"allocating for secure sram context\n");
+			pr_err("Memory allocation failed when "
+			       "allocating for secure sram context\n");
 
 		local_irq_disable();
 		local_fiq_disable();
@@ -796,14 +795,17 @@
 	}
 
 	omap3_save_scratchpad_contents();
-err1:
 	return ret;
-err2:
-	free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
+
+err3:
 	list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
 		list_del(&pwrst->node);
 		kfree(pwrst);
 	}
+	free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init);
+err2:
+	free_irq(omap_prcm_event_to_irq("wkup"), NULL);
+err1:
 	return ret;
 }
 
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 9ccaadc..8856253 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -144,7 +144,7 @@
 static int __init omap4_pm_init(void)
 {
 	int ret;
-	struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
+	struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup;
 	struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
 
 	if (!cpu_is_omap44xx())
@@ -168,14 +168,19 @@
 	 * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
 	 * expected. The hardware recommendation is to enable static
 	 * dependencies for these to avoid system lock ups or random crashes.
+	 * The L4 wakeup depedency is added to workaround the OCP sync hardware
+	 * BUG with 32K synctimer which lead to incorrect timer value read
+	 * from the 32K counter. The BUG applies for GPTIMER1 and WDT2 which
+	 * are part of L4 wakeup clockdomain.
 	 */
 	mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
 	emif_clkdm = clkdm_lookup("l3_emif_clkdm");
 	l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
 	l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
 	l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
+	l4wkup = clkdm_lookup("l4_wkup_clkdm");
 	ducati_clkdm = clkdm_lookup("ducati_clkdm");
-	if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
+	if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) || (!l4wkup) ||
 		(!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
 		goto err2;
 
@@ -183,6 +188,7 @@
 	ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
 	ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
 	ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
+	ret |= clkdm_add_wkdep(mpuss_clkdm, l4wkup);
 	ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
 	ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
 	if (ret) {
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 8a18d1b..96ad3dbe 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -972,7 +972,13 @@
 
 int pwrdm_state_switch(struct powerdomain *pwrdm)
 {
-	return _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW);
+	int ret;
+
+	ret = pwrdm_wait_transition(pwrdm);
+	if (!ret)
+		ret = _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW);
+
+	return ret;
 }
 
 int pwrdm_clkdm_state_switch(struct clockdomain *clkdm)
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index eac623c..f106d21 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -147,8 +147,9 @@
 	u32 mask, st;
 
 	/* XXX read mask from RAM? */
-	mask = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqen_offs);
-	st = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqst_offs);
+	mask = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+				       irqen_offs);
+	st = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqst_offs);
 
 	return mask & st;
 }
@@ -180,7 +181,7 @@
  */
 void omap44xx_prm_ocp_barrier(void)
 {
-	omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+	omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
 				OMAP4_REVISION_PRM_OFFSET);
 }
 
@@ -198,19 +199,19 @@
 void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
 {
 	saved_mask[0] =
-		omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+		omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
 					OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
 	saved_mask[1] =
-		omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+		omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
 					OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
 
-	omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
+	omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
 				 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
-	omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
+	omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
 				 OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
 
 	/* OCP barrier */
-	omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+	omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
 				OMAP4_REVISION_PRM_OFFSET);
 }
 
@@ -226,9 +227,9 @@
  */
 void omap44xx_prm_restore_irqen(u32 *saved_mask)
 {
-	omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_DEVICE_INST,
+	omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_OCP_SOCKET_INST,
 				 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
-	omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_DEVICE_INST,
+	omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_OCP_SOCKET_INST,
 				 OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
 }
 
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 873b51d..d28f848 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -290,7 +290,7 @@
 		goto err;
 	}
 
-	for (i = 0; i <= irq_setup->nr_regs; i++) {
+	for (i = 0; i < irq_setup->nr_regs; i++) {
 		gc = irq_alloc_generic_chip("PRCM", 1,
 			irq_setup->base_irq + i * 32, prm_base,
 			handle_level_irq);
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c
index f51348d..dde8a11 100644
--- a/arch/arm/mach-omap2/usb-host.c
+++ b/arch/arm/mach-omap2/usb-host.c
@@ -54,7 +54,7 @@
 /*
  * setup_ehci_io_mux - initialize IO pad mux for USBHOST
  */
-static void setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode)
+static void __init setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode)
 {
 	switch (port_mode[0]) {
 	case OMAP_EHCI_PORT_MODE_PHY:
@@ -197,7 +197,8 @@
 	return;
 }
 
-static void setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode)
+static
+void __init setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode)
 {
 	switch (port_mode[0]) {
 	case OMAP_EHCI_PORT_MODE_PHY:
@@ -315,7 +316,7 @@
 	}
 }
 
-static void setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
+static void __init setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
 {
 	switch (port_mode[0]) {
 	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
@@ -412,7 +413,8 @@
 	}
 }
 
-static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
+static
+void __init setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
 {
 	switch (port_mode[0]) {
 	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 109ccd2..fe2d1f8 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -113,6 +113,7 @@
 	select IWMMXT
 	select PXA25x
 	select MIGHT_HAVE_PCI
+	select NEED_MACH_IO_H if PCI
 
 config MACH_EM_X270
 	bool "CompuLab EM-x270 platform"
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
new file mode 100644
index 0000000..cd78b7f
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/io.h
@@ -0,0 +1,17 @@
+/*
+ * arch/arm/mach-pxa/include/mach/io.h
+ *
+ * Copied from asm/arch/sa1100/io.h
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+/*
+ * We don't actually have real ISA nor PCI buses, but there is so many
+ * drivers out there that might just work if we fake them...
+ */
+#define __io(a)		__typesafe_io(a)
+
+#endif
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 7d691e5..5905ed1 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -43,6 +43,8 @@
 #include <linux/regulator/consumer.h>
 #include <linux/delay.h>
 
+#include <asm/system_info.h>
+
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
diff --git a/arch/arm/mach-s3c24xx/common.h b/arch/arm/mach-s3c24xx/common.h
new file mode 100644
index 0000000..c2f596e
--- /dev/null
+++ b/arch/arm/mach-s3c24xx/common.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Header for S3C24XX SoCs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S3C24XX_COMMON_H
+#define __ARCH_ARM_MACH_S3C24XX_COMMON_H __FILE__
+
+void s3c2410_restart(char mode, const char *cmd);
+void s3c244x_restart(char mode, const char *cmd);
+
+#endif /* __ARCH_ARM_MACH_S3C24XX_COMMON_H */
diff --git a/arch/arm/mach-s3c24xx/simtec-nor.c b/arch/arm/mach-s3c24xx/simtec-nor.c
index 2119ca6..b9d6d4f 100644
--- a/arch/arm/mach-s3c24xx/simtec-nor.c
+++ b/arch/arm/mach-s3c24xx/simtec-nor.c
@@ -35,9 +35,7 @@
 static void simtec_nor_vpp(struct platform_device *pdev, int vpp)
 {
 	unsigned int val;
-	unsigned long flags;
 
-	local_irq_save(flags);
 	val = __raw_readb(BAST_VA_CTRL3);
 
 	printk(KERN_DEBUG "%s(%d)\n", __func__, vpp);
@@ -48,7 +46,6 @@
 		val &= ~BAST_CPLD_CTRL3_ROMWEN;
 
 	__raw_writeb(val, BAST_VA_CTRL3);
-	local_irq_restore(flags);
 }
 
 static struct physmap_flash_data simtec_nor_pdata = {
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 48885b7..c7f418b 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -313,6 +313,10 @@
 
 	.lccr0		= LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
 	.lccr3		= LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
+
+#ifdef CONFIG_BACKLIGHT_LOCOMO
+	.lcd_power	= locomolcd_power
+#endif
 };
 
 static void __init collie_init(void)
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index 52acda7..f33679d 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -1,7 +1,7 @@
 /*
  * arch/arm/mach-sa1100/include/mach/collie.h
  *
- * This file contains the hardware specific definitions for Assabet
+ * This file contains the hardware specific definitions for Collie
  * Only include this file from SA1100-specific files.
  *
  * ChangeLog:
@@ -13,6 +13,7 @@
 #ifndef __ASM_ARCH_COLLIE_H
 #define __ASM_ARCH_COLLIE_H
 
+extern void locomolcd_power(int on);
 
 #define COLLIE_SCOOP_GPIO_BASE	(GPIO_MAX + 1)
 #define COLLIE_GPIO_CHARGE_ON	(COLLIE_SCOOP_GPIO_BASE + 0)
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index 6a2a7f2..2704bcd 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -15,6 +15,7 @@
 #include <asm/mach-types.h>
 #include <asm/leds.h>
 #include <asm/param.h>
+#include <asm/system_misc.h>
 
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index a125d4e..f49e28a 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -39,6 +39,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
+#include <linux/mtd/sh_flctl.h>
 #include <linux/pm_clock.h>
 #include <linux/smsc911x.h>
 #include <linux/sh_intc.h>
@@ -956,6 +957,50 @@
 	},
 };
 
+/* FLCTL */
+static struct mtd_partition nand_partition_info[] = {
+	{
+		.name	= "system",
+		.offset	= 0,
+		.size	= 128 * 1024 * 1024,
+	},
+	{
+		.name	= "userdata",
+		.offset	= MTDPART_OFS_APPEND,
+		.size	= 256 * 1024 * 1024,
+	},
+	{
+		.name	= "cache",
+		.offset	= MTDPART_OFS_APPEND,
+		.size	= 128 * 1024 * 1024,
+	},
+};
+
+static struct resource nand_flash_resources[] = {
+	[0] = {
+		.start	= 0xe6a30000,
+		.end	= 0xe6a3009b,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct sh_flctl_platform_data nand_flash_data = {
+	.parts		= nand_partition_info,
+	.nr_parts	= ARRAY_SIZE(nand_partition_info),
+	.flcmncr_val	= CLK_16B_12L_4H | TYPESEL_SET
+			| SHBUSSEL | SEL_16BIT | SNAND_E,
+	.use_holden	= 1,
+};
+
+static struct platform_device nand_flash_device = {
+	.name		= "sh_flctl",
+	.resource	= nand_flash_resources,
+	.num_resources	= ARRAY_SIZE(nand_flash_resources),
+	.dev		= {
+		.platform_data = &nand_flash_data,
+	},
+};
+
 /*
  * The card detect pin of the top SD/MMC slot (CN7) is active low and is
  * connected to GPIO A22 of SH7372 (GPIO_PORT41).
@@ -1259,6 +1304,7 @@
 	&fsi_device,
 	&fsi_ak4643_device,
 	&fsi_hdmi_device,
+	&nand_flash_device,
 	&sdhi0_device,
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
 	&sdhi1_device,
@@ -1488,6 +1534,30 @@
 	gpio_request(GPIO_FN_MMCCMD0, NULL);
 	gpio_request(GPIO_FN_MMCCLK0, NULL);
 
+	/* FLCTL */
+	gpio_request(GPIO_FN_D0_NAF0, NULL);
+	gpio_request(GPIO_FN_D1_NAF1, NULL);
+	gpio_request(GPIO_FN_D2_NAF2, NULL);
+	gpio_request(GPIO_FN_D3_NAF3, NULL);
+	gpio_request(GPIO_FN_D4_NAF4, NULL);
+	gpio_request(GPIO_FN_D5_NAF5, NULL);
+	gpio_request(GPIO_FN_D6_NAF6, NULL);
+	gpio_request(GPIO_FN_D7_NAF7, NULL);
+	gpio_request(GPIO_FN_D8_NAF8, NULL);
+	gpio_request(GPIO_FN_D9_NAF9, NULL);
+	gpio_request(GPIO_FN_D10_NAF10, NULL);
+	gpio_request(GPIO_FN_D11_NAF11, NULL);
+	gpio_request(GPIO_FN_D12_NAF12, NULL);
+	gpio_request(GPIO_FN_D13_NAF13, NULL);
+	gpio_request(GPIO_FN_D14_NAF14, NULL);
+	gpio_request(GPIO_FN_D15_NAF15, NULL);
+	gpio_request(GPIO_FN_FCE0, NULL);
+	gpio_request(GPIO_FN_WE0_FWE, NULL);
+	gpio_request(GPIO_FN_FRB, NULL);
+	gpio_request(GPIO_FN_A4_FOE, NULL);
+	gpio_request(GPIO_FN_A5_FCDE, NULL);
+	gpio_request(GPIO_FN_RD_FSC, NULL);
+
 	/* enable GPS module (GT-720F) */
 	gpio_request(GPIO_FN_SCIFA2_TXD1, NULL);
 	gpio_request(GPIO_FN_SCIFA2_RXD1, NULL);
@@ -1532,6 +1602,7 @@
 	sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
 	sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device);
 	sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device);
+	sh7372_add_device_to_domain(&sh7372_a3sp, &nand_flash_device);
 	sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
 	sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index de243e3..94d1f88 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -511,7 +511,7 @@
        MSTP223,
        MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
        MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-       MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
+	MSTP328, MSTP323, MSTP322, MSTP315, MSTP314, MSTP313, MSTP312,
        MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
        MSTP405, MSTP404, MSTP403, MSTP400,
        MSTP_NR };
@@ -553,6 +553,7 @@
 	[MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
 	[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
 	[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
+	[MSTP315] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 15, 0), /* FLCTL*/
 	[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
 	[MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */
 	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */
@@ -653,6 +654,7 @@
 	CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
 	CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
 	CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */
+	CLKDEV_DEV_ID("sh_flctl.0", &mstp_clks[MSTP315]), /* FLCTL */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
 	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 21b09b6..7e65591 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -13,6 +13,7 @@
 #include <linux/suspend.h>
 #include <linux/module.h>
 #include <linux/err.h>
+#include <asm/cpuidle.h>
 #include <asm/io.h>
 
 static void shmobile_enter_wfi(void)
@@ -28,37 +29,19 @@
 				  struct cpuidle_driver *drv,
 				  int index)
 {
-	ktime_t before, after;
-
-	before = ktime_get();
-
-	local_irq_disable();
-	local_fiq_disable();
-
 	shmobile_cpuidle_modes[index]();
 
-	local_irq_enable();
-	local_fiq_enable();
-
-	after = ktime_get();
-	dev->last_residency = ktime_to_ns(ktime_sub(after, before)) >> 10;
-
 	return index;
 }
 
 static struct cpuidle_device shmobile_cpuidle_dev;
 static struct cpuidle_driver shmobile_cpuidle_driver = {
-	.name =		"shmobile_cpuidle",
-	.owner =	THIS_MODULE,
-	.states[0] = {
-		.name = "C1",
-		.desc = "WFI",
-		.exit_latency = 1,
-		.target_residency = 1 * 2,
-		.flags = CPUIDLE_FLAG_TIME_VALID,
-	},
-	.safe_state_index = 0, /* C1 */
-	.state_count = 1,
+	.name			= "shmobile_cpuidle",
+	.owner			= THIS_MODULE,
+	.en_core_tk_irqen	= 1,
+	.states[0]		= ARM_CPUIDLE_WFI_STATE,
+	.safe_state_index	= 0, /* C1 */
+	.state_count		= 1,
 };
 
 void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 8b90c44..1621ad0 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -1544,6 +1544,8 @@
 	.nr_partitions = ARRAY_SIZE(u300_partitions),
 	.options = NAND_SKIP_BBTSCAN,
 	.width = FSMC_NAND_BW8,
+	.ale_off = PLAT_NAND_ALE,
+	.cle_off = PLAT_NAND_CLE,
 };
 
 static struct platform_device nand_device = {
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 7b7cba9..65f87c5 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -24,6 +24,11 @@
 /* NFIF */
 #define U300_NAND_IF_PHYS_BASE		0x9f800000
 
+/* ALE, CLE offset for FSMC NAND */
+#define PLAT_NAND_CLE			(1 << 16)
+#define PLAT_NAND_ALE			(1 << 17)
+
+
 /* AHB Peripherals */
 #define U300_AHB_PER_PHYS_BASE		0xa0000000
 #define U300_AHB_PER_VIRT_BASE		0xff010000
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index a6e23f4..d2268be8 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -190,7 +190,7 @@
 	.flags	= IORESOURCE_MEM | IORESOURCE_PREFETCH,
 };
 
-static int __init pci_versatile_setup_resources(struct list_head *resources)
+static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
 {
 	int ret = 0;
 
@@ -218,9 +218,9 @@
 	 * the mem resource for this bus
 	 * the prefetch mem resource for this bus
 	 */
-	pci_add_resource_offset(resources, &io_mem, sys->io_offset);
-	pci_add_resource_offset(resources, &non_mem, sys->mem_offset);
-	pci_add_resource_offset(resources, &pre_mem, sys->mem_offset);
+	pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset);
+	pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset);
+	pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset);
 
 	goto out;
 
@@ -249,7 +249,7 @@
 
 	if (nr == 0) {
 		sys->mem_offset = 0;
-		ret = pci_versatile_setup_resources(&sys->resources);
+		ret = pci_versatile_setup_resources(sys);
 		if (ret < 0) {
 			printk("pci_versatile_setup: resources... oops?\n");
 			goto out;
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index db82568..48f5b9f 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -27,6 +27,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 
+#include <asm/system_misc.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
diff --git a/arch/arm/plat-mxc/3ds_debugboard.c b/arch/arm/plat-mxc/3ds_debugboard.c
index d1e31fa..5cac2c5 100644
--- a/arch/arm/plat-mxc/3ds_debugboard.c
+++ b/arch/arm/plat-mxc/3ds_debugboard.c
@@ -80,7 +80,7 @@
 
 static struct platform_device smsc_lan9217_device = {
 	.name = "smsc911x",
-	.id = 0,
+	.id = -1,
 	.dev = {
 		.platform_data = &smsc911x_config,
 	},
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index ce1e9b9..ad95c7a 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -17,6 +17,7 @@
 	select IRQ_DOMAIN
 	select HAVE_IDE
 	select NEED_MACH_MEMORY_H
+	select NEED_MACH_IO_H if PCCARD
 	help
 	  "Systems based on omap7xx, omap15xx or omap16xx"
 
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 56b6f8b..8506cbb 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -441,6 +441,8 @@
 		return 0;
 
 	pr_info("clock: disabling unused clocks to save power\n");
+
+	spin_lock_irqsave(&clockfw_lock, flags);
 	list_for_each_entry(ck, &clocks, node) {
 		if (ck->ops == &clkops_null)
 			continue;
@@ -448,10 +450,9 @@
 		if (ck->usecount > 0 || !ck->enable_reg)
 			continue;
 
-		spin_lock_irqsave(&clockfw_lock, flags);
 		arch_clock->clk_disable_unused(ck);
-		spin_unlock_irqrestore(&clockfw_lock, flags);
 	}
+	spin_unlock_irqrestore(&clockfw_lock, flags);
 
 	return 0;
 }
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 9e8e63d..8070145 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -47,17 +47,17 @@
  * with the original PRCM protocol defined for OMAP2420
  */
 #define SYSC_TYPE1_MIDLEMODE_SHIFT	12
-#define SYSC_TYPE1_MIDLEMODE_MASK	(0x3 << SYSC_MIDLEMODE_SHIFT)
+#define SYSC_TYPE1_MIDLEMODE_MASK	(0x3 << SYSC_TYPE1_MIDLEMODE_SHIFT)
 #define SYSC_TYPE1_CLOCKACTIVITY_SHIFT	8
-#define SYSC_TYPE1_CLOCKACTIVITY_MASK	(0x3 << SYSC_CLOCKACTIVITY_SHIFT)
+#define SYSC_TYPE1_CLOCKACTIVITY_MASK	(0x3 << SYSC_TYPE1_CLOCKACTIVITY_SHIFT)
 #define SYSC_TYPE1_SIDLEMODE_SHIFT	3
-#define SYSC_TYPE1_SIDLEMODE_MASK	(0x3 << SYSC_SIDLEMODE_SHIFT)
+#define SYSC_TYPE1_SIDLEMODE_MASK	(0x3 << SYSC_TYPE1_SIDLEMODE_SHIFT)
 #define SYSC_TYPE1_ENAWAKEUP_SHIFT	2
-#define SYSC_TYPE1_ENAWAKEUP_MASK	(1 << SYSC_ENAWAKEUP_SHIFT)
+#define SYSC_TYPE1_ENAWAKEUP_MASK	(1 << SYSC_TYPE1_ENAWAKEUP_SHIFT)
 #define SYSC_TYPE1_SOFTRESET_SHIFT	1
-#define SYSC_TYPE1_SOFTRESET_MASK	(1 << SYSC_SOFTRESET_SHIFT)
+#define SYSC_TYPE1_SOFTRESET_MASK	(1 << SYSC_TYPE1_SOFTRESET_SHIFT)
 #define SYSC_TYPE1_AUTOIDLE_SHIFT	0
-#define SYSC_TYPE1_AUTOIDLE_MASK	(1 << SYSC_AUTOIDLE_SHIFT)
+#define SYSC_TYPE1_AUTOIDLE_MASK	(1 << SYSC_TYPE1_AUTOIDLE_SHIFT)
 
 /*
  * OCP SYSCONFIG bit shifts/masks TYPE2. These are for IPs compliant
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 0db73ae..290942d 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -36,6 +36,7 @@
 #include <asm/irq.h>
 #include <asm/cacheflush.h>
 #include <asm/system_info.h>
+#include <asm/system_misc.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
diff --git a/arch/avr32/boot/images/Makefile b/arch/avr32/boot/images/Makefile
index 1848bf0..2a3b539 100644
--- a/arch/avr32/boot/images/Makefile
+++ b/arch/avr32/boot/images/Makefile
@@ -6,8 +6,6 @@
 # for more details.
 #
 
-MKIMAGE		:= $(srctree)/scripts/mkuboot.sh
-
 extra-y		:= vmlinux.bin vmlinux.gz
 
 OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note.gnu.build-id
@@ -17,10 +15,9 @@
 $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,gzip)
 
-quiet_cmd_uimage = UIMAGE $@
-      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A avr32 -O linux -T kernel	\
-		-C gzip -a $(CONFIG_LOAD_ADDRESS) -e $(CONFIG_ENTRY_ADDRESS)	\
-		-n 'Linux-$(KERNELRELEASE)' -d $< $@
+UIMAGE_LOADADDR = $(CONFIG_LOAD_ADDRESS)
+UIMAGE_ENTRYADDR = $(CONFIG_ENTRY_ADDRESS)
+UIMAGE_COMPRESSION = gzip
 
 targets += uImage uImage.srec
 $(obj)/uImage: $(obj)/vmlinux.gz
diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
index 808001c..0961275 100644
--- a/arch/avr32/include/asm/barrier.h
+++ b/arch/avr32/include/asm/barrier.h
@@ -8,6 +8,8 @@
 #ifndef __ASM_AVR32_BARRIER_H
 #define __ASM_AVR32_BARRIER_H
 
+#define nop()			asm volatile("nop")
+
 #define mb()			asm volatile("" : : : "memory")
 #define rmb()			mb()
 #define wmb()			asm volatile("sync 0" : : : "memory")
diff --git a/arch/avr32/include/asm/special_insns.h b/arch/avr32/include/asm/special_insns.h
deleted file mode 100644
index f922218..0000000
--- a/arch/avr32/include/asm/special_insns.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Copyright (C) 2004-2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_AVR32_SPECIAL_INSNS_H
-#define __ASM_AVR32_SPECIAL_INSNS_H
-
-#define nop() asm volatile("nop")
-
-#endif /* __ASM_AVR32_SPECIAL_INSNS_H */
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 7173386..70742ec9 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -7,7 +7,7 @@
 #include <linux/types.h>
 #include <linux/serial.h>
 #include <linux/platform_data/macb.h>
-#include <linux/platform_data/atmel_nand.h>
+#include <linux/platform_data/atmel.h>
 
 #define GPIO_PIN_NONE	(-1)
 
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index c1269a1..373a690 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -823,7 +823,7 @@
 	bool "Locate cacheline_aligned data to L1 Data Memory"
 	default y if !BF54x
 	default n if BF54x
-	depends on !SMP && !BF531
+	depends on !SMP && !BF531 && !CRC32
 	help
 	  If enabled, cacheline_aligned data is linked
 	  into L1 data memory. (less latency)
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 0a49279..f7d27d5 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -6,20 +6,17 @@
 # for more details.
 #
 
-MKIMAGE := $(srctree)/scripts/mkuboot.sh
-
 targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip
 
-UIMAGE_OPTS-y :=
-UIMAGE_OPTS-$(CONFIG_RAMKERNEL) += -a $(CONFIG_BOOT_LOAD)
-UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -a $(CONFIG_ROM_BASE) -x
-
-quiet_cmd_uimage = UIMAGE  $@
-      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
-                   -C $(2) -n '$(CPU_REV)-$(KERNELRELEASE)' \
-                   -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
-                   $(UIMAGE_OPTS-y) -d $< $@
+ifeq ($(CONFIG_RAMKERNEL),y)
+UIMAGE_LOADADDR = $(CONFIG_BOOT_LOAD)
+else # CONFIG_ROMKERNEL must be set
+UIMAGE_LOADADDR = $(CONFIG_ROM_BASE)
+endif
+UIMAGE_ENTRYADDR = $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}')
+UIMAGE_NAME = '$(CPU_REV)-$(KERNELRELEASE)'
+UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -x
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 9ccc18a..90b1753 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -147,6 +147,7 @@
 CONFIG_USB_MON=y
 CONFIG_USB_MUSB_HDRC=y
 CONFIG_USB_MUSB_BLACKFIN=y
+CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_GADGET=y
 CONFIG_RTC_CLASS=y
diff --git a/arch/blackfin/include/asm/cmpxchg.h b/arch/blackfin/include/asm/cmpxchg.h
index ba2484f..c05868c 100644
--- a/arch/blackfin/include/asm/cmpxchg.h
+++ b/arch/blackfin/include/asm/cmpxchg.h
@@ -122,7 +122,8 @@
 			(unsigned long)(n), sizeof(*(ptr))))
 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 
-#include <asm-generic/cmpxchg.h>
+#define cmpxchg(ptr, o, n)	cmpxchg_local((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n)	cmpxchg64_local((ptr), (o), (n))
 
 #endif /* !CONFIG_SMP */
 
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 5a25856..12d3571 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -244,16 +244,26 @@
 	return -EINVAL;
 }
 
-static inline int gpio_get_value(unsigned gpio)
+static inline int __gpio_get_value(unsigned gpio)
 {
 	return bfin_gpio_get_value(gpio);
 }
 
-static inline void gpio_set_value(unsigned gpio, int value)
+static inline void __gpio_set_value(unsigned gpio, int value)
 {
 	return bfin_gpio_set_value(gpio, value);
 }
 
+static inline int gpio_get_value(unsigned gpio)
+{
+	return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+	return __gpio_set_value(gpio, value);
+}
+
 static inline int gpio_to_irq(unsigned gpio)
 {
 	if (likely(gpio < MAX_BLACKFIN_GPIOS))
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 2aa0193..2ad747e 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -550,6 +550,7 @@
 {
 #ifdef CONFIG_MTD_UCLINUX
 	unsigned long mtd_phys = 0;
+	unsigned long n;
 #endif
 	unsigned long max_mem;
 
@@ -593,9 +594,9 @@
 	mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
 
 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
-	if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
-		mtd_size =
-		    PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
+	n = ext2_image_size((void *)(mtd_phys + 0x400));
+	if (n)
+		mtd_size = PAGE_ALIGN(n * 1024);
 # endif
 
 # if defined(CONFIG_CRAMFS)
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
index f13b78d..ab4577f 100644
--- a/arch/c6x/include/asm/irq.h
+++ b/arch/c6x/include/asm/irq.h
@@ -42,10 +42,6 @@
 /* This number is used when no interrupt has been assigned */
 #define NO_IRQ		0
 
-struct irq_data;
-extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
-extern irq_hw_number_t virq_to_hw(unsigned int virq);
-
 extern void __init init_pic_c64xplus(void);
 
 extern void init_IRQ(void);
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
index 65b8ddf..c90fb5e 100644
--- a/arch/c6x/kernel/irq.c
+++ b/arch/c6x/kernel/irq.c
@@ -130,16 +130,3 @@
 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
 	return 0;
 }
-
-irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
-{
-	return d->hwirq;
-}
-EXPORT_SYMBOL_GPL(irqd_to_hwirq);
-
-irq_hw_number_t virq_to_hw(unsigned int virq)
-{
-	struct irq_data *irq_data = irq_get_irq_data(virq);
-	return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
-}
-EXPORT_SYMBOL_GPL(virq_to_hw);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index 304f675..3b5a050 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -85,10 +85,7 @@
 		goto badframe;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
 		goto badframe;
@@ -279,15 +276,8 @@
 
 	/* Set up the stack frame */
 	ret = setup_rt_frame(sig, ka, info, oldset, regs);
-	if (ret == 0) {
-		spin_lock_irq(&current->sighand->siglock);
-		sigorsets(&current->blocked, &current->blocked,
-			  &ka->sa.sa_mask);
-		if (!(ka->sa.sa_flags & SA_NODEFER))
-			sigaddset(&current->blocked, sig);
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-	}
+	if (ret == 0)
+		block_sigmask(ka, sig);
 
 	return ret;
 }
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 41098a3..4f8d8bc 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -13,6 +13,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/list.h>
 #include <linux/pci.h>
+#include <linux/export.h>
 #include <linux/highmem.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 448b224..233ed3d 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -71,29 +71,35 @@
 	return (dma_addr == bad_dma_address);
 }
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t flag,
+				    struct dma_attrs *attrs)
 {
 	void *ret;
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!dma_ops);
 
-	ret = ops->alloc_coherent(dev, size, dma_handle, flag);
+	ret = ops->alloc(dev, size, dma_handle, flag, attrs);
 
 	debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
 
 	return ret;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *cpu_addr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
 	BUG_ON(!dma_ops);
 
-	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+	dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
 
 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
 }
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index e711ace..3730221 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -54,7 +54,8 @@
 /* Allocates from a pool of uncached memory that was reserved at boot time */
 
 void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
-				 dma_addr_t *dma_addr, gfp_t flag)
+				 dma_addr_t *dma_addr, gfp_t flag,
+				 struct dma_attrs *attrs)
 {
 	void *ret;
 
@@ -81,7 +82,7 @@
 }
 
 static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
-				  dma_addr_t dma_addr)
+				  dma_addr_t dma_addr, struct dma_attrs *attrs)
 {
 	gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
 }
@@ -202,8 +203,8 @@
 }
 
 struct dma_map_ops hexagon_dma_ops = {
-	.alloc_coherent	= hexagon_dma_alloc_coherent,
-	.free_coherent	= hexagon_free_coherent,
+	.alloc		= hexagon_dma_alloc_coherent,
+	.free		= hexagon_free_coherent,
 	.map_sg		= hexagon_map_sg,
 	.map_page	= hexagon_map_page,
 	.sync_single_for_cpu = hexagon_sync_single_for_cpu,
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 15d1fd2..9b44a9e 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -35,7 +35,7 @@
 #define BASE_IPI_IRQ 26
 
 /*
- * cpu_possible_map needs to be filled out prior to setup_per_cpu_areas
+ * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
  * (which is prior to any of our smp_prepare_cpu crap), in order to set
  * up the...  per_cpu areas.
  */
@@ -208,7 +208,7 @@
 	stack_start =  ((void *) thread) + THREAD_SIZE;
 	__vmstart(start_secondary, stack_start);
 
-	while (!cpu_isset(cpu, cpu_online_map))
+	while (!cpu_online(cpu))
 		barrier();
 
 	return 0;
@@ -229,7 +229,7 @@
 
 	/*  Right now, let's just fake it. */
 	for (i = 0; i < max_cpus; i++)
-		cpu_set(i, cpu_present_map);
+		set_cpu_present(i, true);
 
 	/*  Also need to register the interrupts for IPI  */
 	if (max_cpus > 1)
@@ -269,5 +269,5 @@
 	int i;
 
 	for (i = 0; i < NR_CPUS; i++)
-		cpu_set(i, cpu_possible_map);
+		set_cpu_possible(i, true);
 }
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index f6ea3a3..bcda5b2 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1129,7 +1129,8 @@
  * See Documentation/DMA-API-HOWTO.txt
  */
 static void *
-sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
+sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		   gfp_t flags, struct dma_attrs *attrs)
 {
 	struct ioc *ioc;
 	void *addr;
@@ -1191,8 +1192,8 @@
  *
  * See Documentation/DMA-API-HOWTO.txt
  */
-static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
-			       dma_addr_t dma_handle)
+static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
+			      dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
 	free_pages((unsigned long) vaddr, get_order(size));
@@ -2212,8 +2213,8 @@
 __setup("sbapagesize=",sba_page_override);
 
 struct dma_map_ops sba_dma_ops = {
-	.alloc_coherent		= sba_alloc_coherent,
-	.free_coherent		= sba_free_coherent,
+	.alloc			= sba_alloc_coherent,
+	.free			= sba_free_coherent,
 	.map_page		= sba_map_page,
 	.unmap_page		= sba_unmap_page,
 	.map_sg			= sba_map_sg_attrs,
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
index 507c66c..4c96187 100644
--- a/arch/ia64/include/asm/cmpxchg.h
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -1 +1 @@
-/* Future home of xchg() and cmpxchg() */
+#include <asm/intrinsics.h>
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 4336d08..4f5e814 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -23,23 +23,29 @@
 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
 				enum dma_data_direction);
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *daddr, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *daddr, gfp_t gfp,
+				    struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = platform_dma_get_ops(dev);
 	void *caddr;
 
-	caddr = ops->alloc_coherent(dev, size, daddr, gfp);
+	caddr = ops->alloc(dev, size, daddr, gfp, attrs);
 	debug_dma_alloc_coherent(dev, size, *daddr, caddr);
 	return caddr;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *caddr, dma_addr_t daddr)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *caddr, dma_addr_t daddr,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = platform_dma_get_ops(dev);
 	debug_dma_free_coherent(dev, size, caddr, daddr);
-	ops->free_coherent(dev, size, caddr, daddr);
+	ops->free(dev, size, caddr, daddr, attrs);
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index ac795d3..6f38b61 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -839,7 +839,7 @@
 early_param("additional_cpus", setup_additional_cpus);
 
 /*
- * cpu_possible_map should be static, it cannot change as CPUs
+ * cpu_possible_mask should be static, it cannot change as CPUs
  * are onlined, or offlined. The reason is per-cpu data-structures
  * are allocated by some modules at init time, and dont expect to
  * do this dynamically on cpu arrival/departure.
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index d9485d9..939260a 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -15,16 +15,24 @@
 EXPORT_SYMBOL(swiotlb);
 
 static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
-					 dma_addr_t *dma_handle, gfp_t gfp)
+					 dma_addr_t *dma_handle, gfp_t gfp,
+					 struct dma_attrs *attrs)
 {
 	if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
 		gfp |= GFP_DMA;
 	return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
 }
 
+static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
+				       void *vaddr, dma_addr_t dma_addr,
+				       struct dma_attrs *attrs)
+{
+	swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
 struct dma_map_ops swiotlb_dma_ops = {
-	.alloc_coherent = ia64_swiotlb_alloc_coherent,
-	.free_coherent = swiotlb_free_coherent,
+	.alloc = ia64_swiotlb_alloc_coherent,
+	.free = ia64_swiotlb_free_coherent,
 	.map_page = swiotlb_map_page,
 	.unmap_page = swiotlb_unmap_page,
 	.map_sg = swiotlb_map_sg_attrs,
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index a9d310d..3290d6e 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -76,7 +76,8 @@
  * more information.
  */
 static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
-				   dma_addr_t * dma_handle, gfp_t flags)
+				   dma_addr_t * dma_handle, gfp_t flags,
+				   struct dma_attrs *attrs)
 {
 	void *cpuaddr;
 	unsigned long phys_addr;
@@ -137,7 +138,7 @@
  * any associated IOMMU mappings.
  */
 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-				 dma_addr_t dma_handle)
+				 dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -466,8 +467,8 @@
 }
 
 static struct dma_map_ops sn_dma_ops = {
-	.alloc_coherent		= sn_dma_alloc_coherent,
-	.free_coherent		= sn_dma_free_coherent,
+	.alloc			= sn_dma_alloc_coherent,
+	.free			= sn_dma_free_coherent,
 	.map_page		= sn_dma_map_page,
 	.unmap_page		= sn_dma_unmap_page,
 	.map_sg			= sn_dma_map_sg,
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 336e617..f4e32de 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/irqflags.h>
+#include <asm/cmpxchg.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 96fa6ed..d9f62e0 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -980,6 +980,9 @@
 {
 	u8 *swim_base;
 
+	if (!MACH_IS_MAC)
+		return -ENODEV;
+
 	/*
 	 * Serial devices
 	 */
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 512adb6..8a1ce32 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -334,6 +334,9 @@
 {
 	struct platform_device *pdev;
 
+	if (!MACH_IS_Q40)
+		return -ENODEV;
+
 	pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
 	if (IS_ERR(pdev))
 		return PTR_ERR(pdev);
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 34940c8..fa83ea4 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -2,8 +2,6 @@
 # arch/microblaze/boot/Makefile
 #
 
-MKIMAGE := $(srctree)/scripts/mkuboot.sh
-
 obj-y += linked_dtb.o
 
 targets := linux.bin linux.bin.gz simpleImage.%
@@ -35,11 +33,9 @@
 	cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
 				-K _fdt_start vmlinux -o $@
 
-quiet_cmd_uimage = UIMAGE  $@.ub
-	cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \
-		-C none -n 'Linux-$(KERNELRELEASE)' \
-		-a $(CONFIG_KERNEL_BASE_ADDR) -e $(CONFIG_KERNEL_BASE_ADDR) \
-		-d $@ $@.ub
+UIMAGE_IN = $@
+UIMAGE_OUT = $@.ub
+UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
 
 $(obj)/simpleImage.%: vmlinux FORCE
 	$(call if_changed,cp,.unstrip)
diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h
index 0094859a..538afc0 100644
--- a/arch/microblaze/include/asm/cmpxchg.h
+++ b/arch/microblaze/include/asm/cmpxchg.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_MICROBLAZE_CMPXCHG_H
 #define _ASM_MICROBLAZE_CMPXCHG_H
 
+#include <linux/irqflags.h>
+
 void __bad_xchg(volatile void *ptr, int size);
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 3a3e5b8..01d2282 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -123,28 +123,34 @@
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-					dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t flag,
+				    struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 	void *memory;
 
 	BUG_ON(!ops);
 
-	memory = ops->alloc_coherent(dev, size, dma_handle, flag);
+	memory = ops->alloc(dev, size, dma_handle, flag, attrs);
 
 	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
 	return memory;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *cpu_addr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!ops);
 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-	ops->free_coherent(dev, size, cpu_addr, dma_handle);
+	ops->free(dev, size, cpu_addr, dma_handle, attrs);
 }
 
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index b0526d2..ff8cde1 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -24,7 +24,7 @@
 			.word	1b,4b,2b,4b;				\
 			.previous;"					\
 	: "=&r" (oldval), "=&r" (ret)					\
-	: "b" (uaddr), "i" (-EFAULT), "r" (oparg)			\
+	: "r" (uaddr), "i" (-EFAULT), "r" (oparg)			\
 	);								\
 })
 
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 510a8e1..bffb545 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -31,6 +31,8 @@
 /* Do necessary setup to start up a newly executed thread. */
 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
 
+extern void ret_from_fork(void);
+
 # endif /* __ASSEMBLY__ */
 
 # ifndef CONFIG_MMU
@@ -143,8 +145,6 @@
 
 unsigned long get_wchan(struct task_struct *p);
 
-extern void ret_from_fork(void);
-
 /* The size allocated for kernel stacks. This _must_ be a power of two! */
 # define KERNEL_STACK_SIZE	0x2000
 
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 65a4af4..a2bfa2c 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -33,7 +33,8 @@
 #define NOT_COHERENT_CACHE
 
 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag)
+				       dma_addr_t *dma_handle, gfp_t flag,
+				       struct dma_attrs *attrs)
 {
 #ifdef NOT_COHERENT_CACHE
 	return consistent_alloc(flag, size, dma_handle);
@@ -57,7 +58,8 @@
 }
 
 static void dma_direct_free_coherent(struct device *dev, size_t size,
-			      void *vaddr, dma_addr_t dma_handle)
+				     void *vaddr, dma_addr_t dma_handle,
+				     struct dma_attrs *attrs)
 {
 #ifdef NOT_COHERENT_CACHE
 	consistent_free(size, vaddr);
@@ -176,8 +178,8 @@
 }
 
 struct dma_map_ops dma_direct_ops = {
-	.alloc_coherent	= dma_direct_alloc_coherent,
-	.free_coherent	= dma_direct_free_coherent,
+	.alloc		= dma_direct_alloc_coherent,
+	.free		= dma_direct_free_coherent,
 	.map_sg		= dma_direct_map_sg,
 	.unmap_sg	= dma_direct_unmap_sg,
 	.dma_supported	= dma_direct_dma_supported,
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index ec48587..aba1f9a9 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -176,6 +176,7 @@
 	base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
 	printk(KERN_CONT "0x%x\n", base_addr);
 
+#ifdef CONFIG_MMU
 	/*
 	 * Early console is on the top of skipped TLB entries
 	 * decrease tlb_skip size ensure that hardcoded TLB entry will be
@@ -189,6 +190,7 @@
 	 *  cmp rX, orig_base_addr
 	 */
 	tlb_skip -= 1;
+#endif
 }
 
 void __init disable_early_printk(void)
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 71af974..16d8dfd 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -206,6 +206,7 @@
 }
 arch_initcall(microblaze_debugfs_init);
 
+# ifdef CONFIG_MMU
 static int __init debugfs_tlb(void)
 {
 	struct dentry *d;
@@ -218,6 +219,7 @@
 		return -ENOMEM;
 }
 device_initcall(debugfs_tlb);
+# endif
 #endif
 
 static int dflt_bus_notify(struct notifier_block *nb,
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 9781a52..6be4ae3 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -24,6 +24,7 @@
 #include <asm/sections.h>
 #include <asm/exceptions.h>
 #include <asm/unwind.h>
+#include <asm/switch_to.h>
 
 struct stack_trace;
 
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index f037266..f085995 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -122,22 +122,22 @@
 15:	swi	r24, r5, 0x0018 + offset;	\
 16:	swi	r25, r5, 0x001C + offset;	\
 	.section __ex_table,"a";		\
-	.word	1b, 0f;				\
-	.word	2b, 0f;				\
-	.word	3b, 0f;				\
-	.word	4b, 0f;				\
-	.word	5b, 0f;				\
-	.word	6b, 0f;				\
-	.word	7b, 0f;				\
-	.word	8b, 0f;				\
-	.word	9b, 0f;				\
-	.word	10b, 0f;			\
-	.word	11b, 0f;			\
-	.word	12b, 0f;			\
-	.word	13b, 0f;			\
-	.word	14b, 0f;			\
-	.word	15b, 0f;			\
-	.word	16b, 0f;			\
+	.word	1b, 33f;			\
+	.word	2b, 33f;			\
+	.word	3b, 33f;			\
+	.word	4b, 33f;			\
+	.word	5b, 33f;			\
+	.word	6b, 33f;			\
+	.word	7b, 33f;			\
+	.word	8b, 33f;			\
+	.word	9b, 33f;			\
+	.word	10b, 33f;			\
+	.word	11b, 33f;			\
+	.word	12b, 33f;			\
+	.word	13b, 33f;			\
+	.word	14b, 33f;			\
+	.word	15b, 33f;			\
+	.word	16b, 33f;			\
 	.text
 
 #define COPY_80(offset)	\
@@ -190,14 +190,17 @@
 
 .align 4 /* Alignment is important to keep icache happy */
 page:	/* Create room on stack and save registers for storign values */
-	addik   r1, r1, -32
-	swi	r19, r1, 4
-	swi	r20, r1, 8
-	swi	r21, r1, 12
-	swi	r22, r1, 16
-	swi	r23, r1, 20
-	swi	r24, r1, 24
-	swi	r25, r1, 28
+	addik   r1, r1, -40
+	swi	r5, r1, 0
+	swi	r6, r1, 4
+	swi	r7, r1, 8
+	swi	r19, r1, 12
+	swi	r20, r1, 16
+	swi	r21, r1, 20
+	swi	r22, r1, 24
+	swi	r23, r1, 28
+	swi	r24, r1, 32
+	swi	r25, r1, 36
 loop:	/* r4, r19, r20, r21, r22, r23, r24, r25 are used for storing values */
 	/* Loop unrolling to get performance boost */
 	COPY_80(0x000);
@@ -205,21 +208,44 @@
 	COPY_80(0x100);
 	COPY_80(0x180);
 	/* copy loop */
-	addik	r6, r6, 0x200
-	addik	r7, r7, -0x200
-	bneid	r7, loop
-	addik	r5, r5, 0x200
+	addik   r6, r6, 0x200
+	addik   r7, r7, -0x200
+	bneid   r7, loop
+	addik   r5, r5, 0x200
+
 	/* Restore register content */
-	lwi	r19, r1, 4
-	lwi	r20, r1, 8
-	lwi	r21, r1, 12
-	lwi	r22, r1, 16
-	lwi	r23, r1, 20
-	lwi	r24, r1, 24
-	lwi	r25, r1, 28
-	addik   r1, r1, 32
+	lwi	r5, r1, 0
+	lwi	r6, r1, 4
+	lwi	r7, r1, 8
+	lwi	r19, r1, 12
+	lwi	r20, r1, 16
+	lwi	r21, r1, 20
+	lwi	r22, r1, 24
+	lwi	r23, r1, 28
+	lwi	r24, r1, 32
+	lwi	r25, r1, 36
+	addik   r1, r1, 40
 	/* return back */
+	addik	r3, r0, 0
+	rtsd	r15, 8
+	nop
+
+/* Fault case - return temp count */
+33:
 	addik	r3, r7, 0
+	/* Restore register content */
+	lwi	r5, r1, 0
+	lwi	r6, r1, 4
+	lwi	r7, r1, 8
+	lwi	r19, r1, 12
+	lwi	r20, r1, 16
+	lwi	r21, r1, 20
+	lwi	r22, r1, 24
+	lwi	r23, r1, 28
+	lwi	r24, r1, 32
+	lwi	r25, r1, 36
+	addik   r1, r1, 40
+	/* return back */
 	rtsd	r15, 8
 	nop
 
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index b6bb92c..41dd0088 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -157,7 +157,7 @@
 }
 
 static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
-	dma_addr_t *dma_handle, gfp_t gfp)
+	dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 {
 	void *ret;
 
@@ -192,7 +192,7 @@
 }
 
 static void octeon_dma_free_coherent(struct device *dev, size_t size,
-	void *vaddr, dma_addr_t dma_handle)
+	void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	int order = get_order(size);
 
@@ -240,8 +240,8 @@
 
 static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
 	.dma_map_ops = {
-		.alloc_coherent = octeon_dma_alloc_coherent,
-		.free_coherent = octeon_dma_free_coherent,
+		.alloc = octeon_dma_alloc_coherent,
+		.free = octeon_dma_free_coherent,
 		.map_page = octeon_dma_map_page,
 		.unmap_page = swiotlb_unmap_page,
 		.map_sg = octeon_dma_map_sg,
@@ -325,8 +325,8 @@
 #ifdef CONFIG_PCI
 static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
 	.dma_map_ops = {
-		.alloc_coherent = octeon_dma_alloc_coherent,
-		.free_coherent = octeon_dma_free_coherent,
+		.alloc = octeon_dma_alloc_coherent,
+		.free = octeon_dma_free_coherent,
 		.map_page = octeon_dma_map_page,
 		.unmap_page = swiotlb_unmap_page,
 		.map_sg = octeon_dma_map_sg,
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 0a430e0..e44a55b 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -60,7 +60,7 @@
 		if (mymtd) {
 			mymtd->owner = THIS_MODULE;
 			mtd_device_parse_register(mymtd, part_probe_types,
-						  0, NULL, 0);
+						  NULL, NULL, 0);
 		} else {
 			pr_err("Failed to register MTD device for flash\n");
 		}
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index c3e2b85..97e7ce9 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -78,7 +78,7 @@
 }
 
 /**
- * Detect available CPUs, populate cpu_possible_map
+ * Detect available CPUs, populate cpu_possible_mask
  */
 static void octeon_smp_hotplug_setup(void)
 {
@@ -268,7 +268,7 @@
 
 	spin_lock(&smp_reserve_lock);
 
-	cpu_clear(cpu, cpu_online_map);
+	set_cpu_online(cpu, false);
 	cpu_clear(cpu, cpu_callin_map);
 	local_irq_disable();
 	fixup_irqs();
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 7aa37dd..be39a12 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -57,25 +57,31 @@
 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction);
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t gfp,
+				    struct dma_attrs *attrs)
 {
 	void *ret;
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
-	ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
+	ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
 
 	debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
 
 	return ret;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *vaddr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
-	ops->free_coherent(dev, size, vaddr, dma_handle);
+	ops->free(dev, size, vaddr, dma_handle, attrs);
 
 	debug_dma_free_coherent(dev, size, vaddr, dma_handle);
 }
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 802e616..33f63ba 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -173,7 +173,7 @@
 	if (retval)
 		goto out_unlock;
 
-	cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
+	cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
 
 out_unlock:
 	read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index e309665..f8b2c59 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -25,7 +25,7 @@
 	int i;
 
 #ifdef CONFIG_SMP
-	if (!cpu_isset(n, cpu_online_map))
+	if (!cpu_online(n))
 		return 0;
 #endif
 
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index ca67356..3046e29 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -317,7 +317,7 @@
 
 	pr_info("SMP: CPU%d is offline\n", cpu);
 
-	cpu_clear(cpu, cpu_online_map);
+	set_cpu_online(cpu, false);
 	cpu_clear(cpu, cpu_callin_map);
 
 	local_flush_tlb_all();
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9c1cce9..ba9376bf 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -148,7 +148,7 @@
 	/*
 	 * Remove this CPU:
 	 */
-	cpu_clear(smp_processor_id(), cpu_online_map);
+	set_cpu_online(smp_processor_id(), false);
 	for (;;) {
 		if (cpu_wait)
 			(*cpu_wait)();		/* Wait if available. */
@@ -174,7 +174,7 @@
 	mp_ops->prepare_cpus(max_cpus);
 	set_cpu_sibling_map(0);
 #ifndef CONFIG_HOTPLUG_CPU
-	init_cpu_present(&cpu_possible_map);
+	init_cpu_present(cpu_possible_mask);
 #endif
 }
 
@@ -248,7 +248,7 @@
 	while (!cpu_isset(cpu, cpu_callin_map))
 		udelay(100);
 
-	cpu_set(cpu, cpu_online_map);
+	set_cpu_online(cpu, true);
 
 	return 0;
 }
@@ -320,13 +320,12 @@
 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
 	} else {
-		cpumask_t mask = cpu_online_map;
 		unsigned int cpu;
 
-		cpu_clear(smp_processor_id(), mask);
-		for_each_cpu_mask(cpu, mask)
-			if (cpu_context(cpu, mm))
+		for_each_online_cpu(cpu) {
+			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
 				cpu_context(cpu, mm) = 0;
+		}
 	}
 	local_flush_tlb_mm(mm);
 
@@ -360,13 +359,12 @@
 
 		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
 	} else {
-		cpumask_t mask = cpu_online_map;
 		unsigned int cpu;
 
-		cpu_clear(smp_processor_id(), mask);
-		for_each_cpu_mask(cpu, mask)
-			if (cpu_context(cpu, mm))
+		for_each_online_cpu(cpu) {
+			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
 				cpu_context(cpu, mm) = 0;
+		}
 	}
 	local_flush_tlb_range(vma, start, end);
 	preempt_enable();
@@ -407,13 +405,12 @@
 
 		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
 	} else {
-		cpumask_t mask = cpu_online_map;
 		unsigned int cpu;
 
-		cpu_clear(smp_processor_id(), mask);
-		for_each_cpu_mask(cpu, mask)
-			if (cpu_context(cpu, vma->vm_mm))
+		for_each_online_cpu(cpu) {
+			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
 				cpu_context(cpu, vma->vm_mm) = 0;
+		}
 	}
 	local_flush_tlb_page(vma, page);
 	preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index c4f75bb..f5dd38f 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -291,7 +291,7 @@
  * possibly leave some TCs/VPEs as "slave" processors.
  *
  * Use c0_MVPConf0 to find out how many TCs are available, setting up
- * cpu_possible_map and the logical/physical mappings.
+ * cpu_possible_mask and the logical/physical mappings.
  */
 
 int __init smtc_build_cpu_map(int start_cpu_slot)
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 1f9ca07..47037ec 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -80,9 +80,9 @@
 	if (vma)
 		mask = *mm_cpumask(vma->vm_mm);
 	else
-		mask = cpu_online_map;
-	cpu_clear(cpu, mask);
-	for_each_cpu_mask(cpu, mask)
+		mask = *cpu_online_mask;
+	cpumask_clear_cpu(cpu, &mask);
+	for_each_cpu(cpu, &mask)
 		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
 
 	preempt_enable();
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 4608491..3fab204 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -98,7 +98,7 @@
 EXPORT_SYMBOL(dma_alloc_noncoherent);
 
 static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
-	dma_addr_t * dma_handle, gfp_t gfp)
+	dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 {
 	void *ret;
 
@@ -132,7 +132,7 @@
 EXPORT_SYMBOL(dma_free_noncoherent);
 
 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-	dma_addr_t dma_handle)
+	dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	unsigned long addr = (unsigned long) vaddr;
 	int order = get_order(size);
@@ -323,8 +323,8 @@
 EXPORT_SYMBOL(dma_cache_sync);
 
 static struct dma_map_ops mips_default_dma_map_ops = {
-	.alloc_coherent = mips_dma_alloc_coherent,
-	.free_coherent = mips_dma_free_coherent,
+	.alloc = mips_dma_alloc_coherent,
+	.free = mips_dma_free_coherent,
 	.map_page = mips_dma_map_page,
 	.unmap_page = mips_dma_unmap_page,
 	.map_sg = mips_dma_map_sg,
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index db17f49..fab316d 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -165,7 +165,7 @@
 	cpu_set(boot_cpu, phys_cpu_present_map);
 	__cpu_number_map[boot_cpu] = 0;
 	__cpu_logical_map[0] = boot_cpu;
-	cpu_set(0, cpu_possible_map);
+	set_cpu_possible(0, true);
 
 	num_cpus = 1;
 	for (i = 0; i < NR_CPUS; i++) {
@@ -177,14 +177,14 @@
 			cpu_set(i, phys_cpu_present_map);
 			__cpu_number_map[i] = num_cpus;
 			__cpu_logical_map[num_cpus] = i;
-			cpu_set(num_cpus, cpu_possible_map);
+			set_cpu_possible(num_cpus, true);
 			++num_cpus;
 		}
 	}
 
 	pr_info("Phys CPU present map: %lx, possible map %lx\n",
 		(unsigned long)phys_cpu_present_map.bits[0],
-		(unsigned long)cpu_possible_map.bits[0]);
+		(unsigned long)cpumask_bits(cpu_possible_mask)[0]);
 
 	pr_info("Detected %i Slave CPU(s)\n", num_cpus);
 	nlm_set_nmi_handler(nlm_boot_secondary_cpus);
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 2608752..b71fae2 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -146,7 +146,7 @@
 }
 
 /*
- * Detect available CPUs, populate cpu_possible_map before smp_init
+ * Detect available CPUs, populate cpu_possible_mask before smp_init
  *
  * We don't want to start the secondary CPU yet nor do we have a nice probing
  * feature in PMON so we just assume presence of the secondary core.
@@ -155,10 +155,10 @@
 {
 	int i;
 
-	cpus_clear(cpu_possible_map);
+	init_cpu_possible(cpu_none_mask);
 
 	for (i = 0; i < 2; i++) {
-		cpu_set(i, cpu_possible_map);
+		set_cpu_possible(i, true);
 		__cpu_number_map[i]	= i;
 		__cpu_logical_map[i]	= i;
 	}
@@ -169,7 +169,7 @@
 	/*
 	 * Be paranoid.  Enable the IPI only if we're really about to go SMP.
 	 */
-	if (cpus_weight(cpu_possible_map))
+	if (num_possible_cpus())
 		set_c0_status(STATUSF_IP5);
 }
 
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index c6851df..735b43b 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@
 			/* Only let it join in if it's marked enabled */
 			if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
 			    (tot_cpus_found != NR_CPUS)) {
-				cpu_set(cpuid, cpu_possible_map);
+				set_cpu_possible(cpuid, true);
 				alloc_cpupda(cpuid, tot_cpus_found);
 				cpus_found++;
 				tot_cpus_found++;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index d667875..de88e22 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -138,7 +138,7 @@
 
 /*
  * Use CFE to find out how many CPUs are available, setting up
- * cpu_possible_map and the logical/physical mappings.
+ * cpu_possible_mask and the logical/physical mappings.
  * XXXKW will the boot CPU ever not be physical 0?
  *
  * Common setup before any secondaries are started
@@ -147,14 +147,13 @@
 {
 	int i, num;
 
-	cpus_clear(cpu_possible_map);
-	cpu_set(0, cpu_possible_map);
+	init_cpu_possible(cpumask_of(0));
 	__cpu_number_map[0] = 0;
 	__cpu_logical_map[0] = 0;
 
 	for (i = 1, num = 0; i < NR_CPUS; i++) {
 		if (cfe_cpu_stop(i) == 0) {
-			cpu_set(i, cpu_possible_map);
+			set_cpu_possible(i, true);
 			__cpu_number_map[i] = ++num;
 			__cpu_logical_map[num] = i;
 		}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 38e7f6b..285cfef 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -126,7 +126,7 @@
 
 /*
  * Use CFE to find out how many CPUs are available, setting up
- * cpu_possible_map and the logical/physical mappings.
+ * cpu_possible_mask and the logical/physical mappings.
  * XXXKW will the boot CPU ever not be physical 0?
  *
  * Common setup before any secondaries are started
@@ -135,14 +135,13 @@
 {
 	int i, num;
 
-	cpus_clear(cpu_possible_map);
-	cpu_set(0, cpu_possible_map);
+	init_cpu_possible(cpumask_of(0));
 	__cpu_number_map[0] = 0;
 	__cpu_logical_map[0] = 0;
 
 	for (i = 1, num = 0; i < NR_CPUS; i++) {
 		if (cfe_cpu_stop(i) == 0) {
-			cpu_set(i, cpu_possible_map);
+			set_cpu_possible(i, true);
 			__cpu_number_map[i] = ++num;
 			__cpu_logical_map[num] = i;
 		}
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 3ae5607..6c6defc 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -6,6 +6,7 @@
 #define _ASM_PARISC_ATOMIC_H_
 
 #include <linux/types.h>
+#include <asm/cmpxchg.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -48,112 +49,6 @@
 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
 #endif
 
-/* This should get optimized out since it's never called.
-** Or get a link error if xchg is used "wrong".
-*/
-extern void __xchg_called_with_bad_pointer(void);
-
-
-/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __xchg8(char, char *);
-extern unsigned long __xchg32(int, int *);
-#ifdef CONFIG_64BIT
-extern unsigned long __xchg64(unsigned long, unsigned long *);
-#endif
-
-/* optimizer better get rid of switch since size is a constant */
-static __inline__ unsigned long
-__xchg(unsigned long x, __volatile__ void * ptr, int size)
-{
-	switch(size) {
-#ifdef CONFIG_64BIT
-	case 8: return __xchg64(x,(unsigned long *) ptr);
-#endif
-	case 4: return __xchg32((int) x, (int *) ptr);
-	case 1: return __xchg8((char) x, (char *) ptr);
-	}
-	__xchg_called_with_bad_pointer();
-	return x;
-}
-
-
-/*
-** REVISIT - Abandoned use of LDCW in xchg() for now:
-** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
-** o and while we are at it, could CONFIG_64BIT code use LDCD too?
-**
-**	if (__builtin_constant_p(x) && (x == NULL))
-**		if (((unsigned long)p & 0xf) == 0)
-**			return __ldcw(p);
-*/
-#define xchg(ptr,x) \
-	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-
-#define __HAVE_ARCH_CMPXCHG	1
-
-/* bug catcher for when unsupported size is used - won't link */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
-extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
-
-/* don't worry...optimizer will get rid of most of this */
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
-{
-	switch(size) {
-#ifdef CONFIG_64BIT
-	case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
-#endif
-	case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
-	}
-	__cmpxchg_called_with_bad_pointer();
-	return old;
-}
-
-#define cmpxchg(ptr,o,n)						 \
-  ({									 \
-     __typeof__(*(ptr)) _o_ = (o);					 \
-     __typeof__(*(ptr)) _n_ = (n);					 \
-     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
-				    (unsigned long)_n_, sizeof(*(ptr))); \
-  })
-
-#include <asm-generic/cmpxchg-local.h>
-
-static inline unsigned long __cmpxchg_local(volatile void *ptr,
-				      unsigned long old,
-				      unsigned long new_, int size)
-{
-	switch (size) {
-#ifdef CONFIG_64BIT
-	case 8:	return __cmpxchg_u64((unsigned long *)ptr, old, new_);
-#endif
-	case 4:	return __cmpxchg_u32(ptr, old, new_);
-	default:
-		return __cmpxchg_local_generic(ptr, old, new_, size);
-	}
-}
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n)				  	\
-	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
-			(unsigned long)(n), sizeof(*(ptr))))
-#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n)					\
-  ({									\
-	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	cmpxchg_local((ptr), (o), (n));					\
-  })
-#else
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#endif
-
 /*
  * Note that we need not lock read accesses - aligned word writes/reads
  * are atomic, so a reader never sees inconsistent values.
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
new file mode 100644
index 0000000..dbd1335
--- /dev/null
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -0,0 +1,116 @@
+/*
+ * forked from parisc asm/atomic.h which was:
+ *	Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ *	Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+#ifndef _ASM_PARISC_CMPXCHG_H_
+#define _ASM_PARISC_CMPXCHG_H_
+
+/* This should get optimized out since it's never called.
+** Or get a link error if xchg is used "wrong".
+*/
+extern void __xchg_called_with_bad_pointer(void);
+
+/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __xchg8(char, char *);
+extern unsigned long __xchg32(int, int *);
+#ifdef CONFIG_64BIT
+extern unsigned long __xchg64(unsigned long, unsigned long *);
+#endif
+
+/* optimizer better get rid of switch since size is a constant */
+static inline unsigned long
+__xchg(unsigned long x, __volatile__ void *ptr, int size)
+{
+	switch (size) {
+#ifdef CONFIG_64BIT
+	case 8: return __xchg64(x, (unsigned long *) ptr);
+#endif
+	case 4: return __xchg32((int) x, (int *) ptr);
+	case 1: return __xchg8((char) x, (char *) ptr);
+	}
+	__xchg_called_with_bad_pointer();
+	return x;
+}
+
+/*
+** REVISIT - Abandoned use of LDCW in xchg() for now:
+** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
+** o and while we are at it, could CONFIG_64BIT code use LDCD too?
+**
+**	if (__builtin_constant_p(x) && (x == NULL))
+**		if (((unsigned long)p & 0xf) == 0)
+**			return __ldcw(p);
+*/
+#define xchg(ptr, x) \
+	((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+
+#define __HAVE_ARCH_CMPXCHG	1
+
+/* bug catcher for when unsupported size is used - won't link */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
+				   unsigned int new_);
+extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr,
+				   unsigned long old, unsigned long new_);
+
+/* don't worry...optimizer will get rid of most of this */
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
+{
+	switch (size) {
+#ifdef CONFIG_64BIT
+	case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+	case 4: return __cmpxchg_u32((unsigned int *)ptr,
+				     (unsigned int)old, (unsigned int)new_);
+	}
+	__cmpxchg_called_with_bad_pointer();
+	return old;
+}
+
+#define cmpxchg(ptr, o, n)						 \
+({									 \
+	__typeof__(*(ptr)) _o_ = (o);					 \
+	__typeof__(*(ptr)) _n_ = (n);					 \
+	(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,	 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+})
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+				      unsigned long old,
+				      unsigned long new_, int size)
+{
+	switch (size) {
+#ifdef CONFIG_64BIT
+	case 8:	return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+	case 4:	return __cmpxchg_u32(ptr, old, new_);
+	default:
+		return __cmpxchg_local_generic(ptr, old, new_, size);
+	}
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n)					\
+	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
+			(unsigned long)(n), sizeof(*(ptr))))
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n)					\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg_local((ptr), (o), (n));					\
+})
+#else
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 2388bdb..49df148 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -8,6 +8,29 @@
 #include <asm/atomic.h>
 #include <asm/errno.h>
 
+/* The following has to match the LWS code in syscall.S.  We have
+   sixteen four-word locks. */
+
+static inline void
+_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
+{
+	extern u32 lws_lock_start[];
+	long index = ((long)uaddr & 0xf0) >> 2;
+	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+	local_irq_save(*flags);
+	arch_spin_lock(s);
+}
+
+static inline void
+_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
+{
+	extern u32 lws_lock_start[];
+	long index = ((long)uaddr & 0xf0) >> 2;
+	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+	arch_spin_unlock(s);
+	local_irq_restore(*flags);
+}
+
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
@@ -26,7 +49,7 @@
 
 	pagefault_disable();
 
-	_atomic_spin_lock_irqsave(uaddr, flags);
+	_futex_spin_lock_irqsave(uaddr, &flags);
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -71,7 +94,7 @@
 		ret = -ENOSYS;
 	}
 
-	_atomic_spin_unlock_irqrestore(uaddr, flags);
+	_futex_spin_unlock_irqrestore(uaddr, &flags);
 
 	pagefault_enable();
 
@@ -113,7 +136,7 @@
 	 * address. This should scale to a couple of CPUs.
 	 */
 
-	_atomic_spin_lock_irqsave(uaddr, flags);
+	_futex_spin_lock_irqsave(uaddr, &flags);
 
 	ret = get_user(val, uaddr);
 
@@ -122,7 +145,7 @@
 
 	*uval = val;
 
-	_atomic_spin_unlock_irqrestore(uaddr, flags);
+	_futex_spin_unlock_irqrestore(uaddr, &flags);
 
 	return ret;
 }
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 5006e8e..0bb1d63 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -290,8 +290,7 @@
 	mb();
 
 	/* Well, support 2.4 linux scheme as well. */
-	if (cpu_isset(cpunum, cpu_online_map))
-	{
+	if (cpu_online(cpunum))	{
 		extern void machine_halt(void); /* arch/parisc.../process.c */
 
 		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
diff --git a/arch/powerpc/boot/dts/p1020mbg-pc.dtsi b/arch/powerpc/boot/dts/p1020mbg-pc.dtsi
new file mode 100644
index 0000000..a24699c
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020mbg-pc.dtsi
@@ -0,0 +1,151 @@
+/*
+ * P1020 MBG-PC Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	nor@0,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x4000000>;
+		bank-width = <2>;
+		device-width = <1>;
+
+		partition@0 {
+			/* 128KB for DTB Image */
+			reg = <0x0 0x00020000>;
+			label = "NOR DTB Image";
+		};
+
+		partition@20000 {
+			/* 3.875 MB for Linux Kernel Image */
+			reg = <0x00020000 0x003e0000>;
+			label = "NOR Linux Kernel Image";
+		};
+
+		partition@400000 {
+			/* 58MB for Root file System */
+			reg = <0x00400000 0x03a00000>;
+			label = "NOR Root File System";
+		};
+
+		partition@3e00000 {
+			/* This location must not be altered  */
+			/* 1M for Vitesse 7385 Switch firmware */
+			reg = <0x3e00000 0x00100000>;
+			label = "NOR Vitesse-7385 Firmware";
+			read-only;
+		};
+
+		partition@3f00000 {
+			/* This location must not be altered  */
+			/* 512KB for u-boot Bootloader Image */
+			/* 512KB for u-boot Environment Variables */
+			reg = <0x03f00000 0x00100000>;
+			label = "NOR U-Boot Image";
+			read-only;
+		};
+	};
+
+	L2switch@2,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "vitesse-7385";
+		reg = <0x2 0x0 0x20000>;
+	};
+};
+
+&soc {
+	i2c@3000 {
+		rtc@68 {
+			compatible = "dallas,ds1339";
+			reg = <0x68>;
+		};
+	};
+
+	mdio@24000 {
+		phy0: ethernet-phy@0 {
+			interrupts = <3 1 0 0>;
+			reg = <0x0>;
+		};
+		phy1: ethernet-phy@1 {
+			interrupts = <2 1 0 0>;
+			reg = <0x1>;
+		};
+	};
+
+	mdio@25000 {
+		tbi1: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	mdio@26000 {
+		tbi2: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet0: ethernet@b0000 {
+		fixed-link = <1 1 1000 0 0>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	enet1: ethernet@b1000 {
+		phy-handle = <&phy0>;
+		tbi-handle = <&tbi1>;
+		phy-connection-type = "sgmii";
+	};
+
+	enet2: ethernet@b2000 {
+		phy-handle = <&phy1>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	usb@22000 {
+		phy_type = "ulpi";
+	};
+
+	/* USB2 is shared with localbus, so it must be disabled
+	   by default. We can't put 'status = "disabled";' here
+	   since U-Boot doesn't clear the status property when
+	   it enables USB2. OTOH, U-Boot does create a new node
+	   when there isn't any. So, just comment it out.
+	*/
+	usb@23000 {
+		status = "disabled";
+		phy_type = "ulpi";
+	};
+};
diff --git a/arch/powerpc/boot/dts/p1020mbg-pc_32b.dts b/arch/powerpc/boot/dts/p1020mbg-pc_32b.dts
new file mode 100644
index 0000000..ab8f076
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020mbg-pc_32b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 MBG-PC Device Tree Source (32-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+	model = "fsl,P1020MBG-PC";
+	compatible = "fsl,P1020MBG-PC";
+
+	memory {
+		device_type = "memory";
+	};
+
+	lbc: localbus@ffe05000 {
+		reg = <0x0 0xffe05000 0x0 0x1000>;
+
+		/* NOR and L2 switch */
+		ranges = <0x0 0x0 0x0 0xec000000 0x04000000
+			  0x1 0x0 0x0 0xffa00000 0x00040000
+			  0x2 0x0 0x0 0xffb00000 0x00020000>;
+	};
+
+	soc: soc@ffe00000 {
+		ranges = <0x0 0x0 0xffe00000 0x100000>;
+	};
+
+	pci0: pcie@ffe09000 {
+		reg = <0x0 0xffe09000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0x0 0xa0000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0x0 0xffc10000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+
+	pci1: pcie@ffe0a000 {
+		reg = <0x0 0xffe0a000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0x0 0x80000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0x0 0xffc00000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+};
+
+/include/ "p1020mbg-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020mbg-pc_36b.dts b/arch/powerpc/boot/dts/p1020mbg-pc_36b.dts
new file mode 100644
index 0000000..9e9f401
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020mbg-pc_36b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 MBG-PC Device Tree Source (36-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+	model = "fsl,P1020MBG-PC";
+	compatible = "fsl,P1020MBG-PC";
+
+	memory {
+		device_type = "memory";
+	};
+
+	lbc: localbus@fffe05000 {
+		reg = <0xf 0xffe05000 0x0 0x1000>;
+
+		/* NOR and L2 switch */
+		ranges = <0x0 0x0 0xf 0xec000000 0x04000000
+			  0x1 0x0 0xf 0xffa00000 0x00040000
+			  0x2 0x0 0xf 0xffb00000 0x00020000>;
+	};
+
+	soc: soc@fffe00000 {
+		ranges = <0x0 0xf 0xffe00000 0x100000>;
+	};
+
+	pci0: pcie@fffe09000 {
+		reg = <0xf 0xffe09000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+
+	pci1: pcie@fffe0a000 {
+		reg = <0xf 0xffe0a000 0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+};
+
+/include/ "p1020mbg-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020utm-pc.dtsi b/arch/powerpc/boot/dts/p1020utm-pc.dtsi
new file mode 100644
index 0000000..7ea85ea
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020utm-pc.dtsi
@@ -0,0 +1,140 @@
+/*
+ * P1020 UTM-PC Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	nor@0,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x2000000>;
+		bank-width = <2>;
+		device-width = <1>;
+
+		partition@0 {
+			/* 256KB for DTB Image */
+			reg = <0x0 0x00040000>;
+			label = "NOR DTB Image";
+		};
+
+		partition@40000 {
+			/* 3.75 MB for Linux Kernel Image */
+			reg = <0x00040000 0x003c0000>;
+			label = "NOR Linux Kernel Image";
+		};
+
+		partition@400000 {
+			/* 27MB for Root file System */
+			reg = <0x00400000 0x01b00000>;
+			label = "NOR Root File System";
+		};
+
+		partition@1f00000 {
+			/* This location must not be altered  */
+			/* 512KB for u-boot Bootloader Image */
+			/* 512KB for u-boot Environment Variables */
+			reg = <0x01f00000 0x00100000>;
+			label = "NOR U-Boot Image";
+			read-only;
+		};
+	};
+};
+
+&soc {
+	i2c@3000 {
+		rtc@68 {
+			compatible = "dallas,ds1339";
+			reg = <0x68>;
+		};
+	};
+
+	mdio@24000 {
+		phy0: ethernet-phy@0 {
+			interrupts = <3 1 0 0>;
+			reg = <0x0>;
+		};
+		phy1: ethernet-phy@1 {
+			interrupts = <2 1 0 0>;
+			reg = <0x1>;
+		};
+		phy2: ethernet-phy@2 {
+			interrupts = <1 1 0 0>;
+			reg = <0x2>;
+		};
+	};
+
+	mdio@25000 {
+		tbi1: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	mdio@26000 {
+		tbi2: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet0: ethernet@b0000 {
+		phy-handle = <&phy2>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	enet1: ethernet@b1000 {
+		phy-handle = <&phy0>;
+		tbi-handle = <&tbi1>;
+		phy-connection-type = "sgmii";
+	};
+
+	enet2: ethernet@b2000 {
+		phy-handle = <&phy1>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	usb@22000 {
+		phy_type = "ulpi";
+	};
+
+	/* USB2 is shared with localbus, so it must be disabled
+	   by default. We can't put 'status = "disabled";' here
+	   since U-Boot doesn't clear the status property when
+	   it enables USB2. OTOH, U-Boot does create a new node
+	   when there isn't any. So, just comment it out.
+	*/
+	usb@23000 {
+		status = "disabled";
+		phy_type = "ulpi";
+	};
+};
diff --git a/arch/powerpc/boot/dts/p1020utm-pc_32b.dts b/arch/powerpc/boot/dts/p1020utm-pc_32b.dts
new file mode 100644
index 0000000..4bfdd89
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020utm-pc_32b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 UTM-PC Device Tree Source (32-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+	model = "fsl,P1020UTM-PC";
+	compatible = "fsl,P1020UTM-PC";
+
+	memory {
+		device_type = "memory";
+	};
+
+	lbc: localbus@ffe05000 {
+		reg = <0x0 0xffe05000 0x0 0x1000>;
+
+		/* NOR */
+		ranges = <0x0 0x0 0x0 0xec000000 0x02000000
+			  0x1 0x0 0x0 0xffa00000 0x00040000
+			  0x2 0x0 0x0 0xffb00000 0x00020000>;
+	};
+
+	soc: soc@ffe00000 {
+		ranges = <0x0 0x0 0xffe00000 0x100000>;
+	};
+
+	pci0: pcie@ffe09000 {
+		reg = <0x0 0xffe09000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0x0 0xa0000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0x0 0xffc10000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+
+	pci1: pcie@ffe0a000 {
+		reg = <0x0 0xffe0a000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0x0 0x80000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0x0 0xffc00000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+};
+
+/include/ "p1020utm-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020utm-pc_36b.dts b/arch/powerpc/boot/dts/p1020utm-pc_36b.dts
new file mode 100644
index 0000000..abec535
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020utm-pc_36b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1020 UTM-PC Device Tree Source (36-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+	model = "fsl,P1020UTM-PC";
+	compatible = "fsl,P1020UTM-PC";
+
+	memory {
+		device_type = "memory";
+	};
+
+	lbc: localbus@fffe05000 {
+		reg = <0xf 0xffe05000 0x0 0x1000>;
+
+		/* NOR */
+		ranges = <0x0 0x0 0xf 0xec000000 0x02000000
+			  0x1 0x0 0xf 0xffa00000 0x00040000
+			  0x2 0x0 0xf 0xffb00000 0x00020000>;
+	};
+
+	soc: soc@fffe00000 {
+		ranges = <0x0 0xf 0xffe00000 0x100000>;
+	};
+
+	pci0: pcie@fffe09000 {
+		reg = <0xf 0xffe09000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+
+	pci1: pcie@fffe0a000 {
+		reg = <0xf 0xffe0a000 0 0x1000>;
+		ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xe0000000
+				  0x2000000 0x0 0xe0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+};
+
+/include/ "p1020utm-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index 4f957db..2852139 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -135,7 +135,6 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
-		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -151,7 +150,6 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
-		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -167,7 +165,6 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
-		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index f469145..22a215e 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -173,7 +173,6 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
-		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -189,7 +188,6 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
-		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -205,7 +203,6 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
-		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -221,7 +218,6 @@
 		reg = <0xf 0xfe203000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
-		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p3060qds.dts b/arch/powerpc/boot/dts/p3060qds.dts
index 529042e..9ae875c 100644
--- a/arch/powerpc/boot/dts/p3060qds.dts
+++ b/arch/powerpc/boot/dts/p3060qds.dts
@@ -212,7 +212,6 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
-		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -228,7 +227,6 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
-		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 6d60e54..3e20460 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -141,7 +141,6 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
-		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -157,7 +156,6 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
-		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -173,7 +171,6 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
-		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index 1c25068..27c07ed 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -173,7 +173,6 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
-		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -189,7 +188,6 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
-		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -205,7 +203,6 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
-		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -221,7 +218,6 @@
 		reg = <0xf 0xfe203000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
-		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index f8aef20..91db656 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -116,6 +116,7 @@
 CONFIG_HW_RANDOM=y
 CONFIG_NVRAM=y
 CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MPC=y
 CONFIG_SPI=y
 CONFIG_SPI_GPIO=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 82b13bf..6798343 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -71,6 +71,8 @@
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
 CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MPC=y
 # CONFIG_HWMON is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=y
 # CONFIG_HID_SUPPORT is not set
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index cc87a84..d6b6df5 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -117,6 +117,7 @@
 CONFIG_SERIAL_QE=m
 CONFIG_NVRAM=y
 CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_CPM=m
 CONFIG_I2C_MPC=y
 CONFIG_SPI=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 48d6682..5b0e292 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -119,6 +119,7 @@
 CONFIG_SERIAL_QE=m
 CONFIG_NVRAM=y
 CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_CPM=m
 CONFIG_I2C_MPC=y
 CONFIG_SPI=y
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index dd70fac..62678e3 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -22,9 +22,11 @@
 
 /* Some dma direct funcs must be visible for use in other dma_ops */
 extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag);
+				       dma_addr_t *dma_handle, gfp_t flag,
+				       struct dma_attrs *attrs);
 extern void dma_direct_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t dma_handle);
+				     void *vaddr, dma_addr_t dma_handle,
+				     struct dma_attrs *attrs);
 
 
 #ifdef CONFIG_NOT_COHERENT_CACHE
@@ -130,23 +132,29 @@
 
 extern int dma_set_mask(struct device *dev, u64 dma_mask);
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t flag,
+				    struct dma_attrs *attrs)
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
 	void *cpu_addr;
 
 	BUG_ON(!dma_ops);
 
-	cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+	cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
 
 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
 
 	return cpu_addr;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *cpu_addr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
@@ -154,7 +162,7 @@
 
 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
 
-	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+	dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
 }
 
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index f3b0c2c..976835d 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -134,10 +134,15 @@
  * whether they will be clobbered.
  *
  * Note that r11 can be used as an output parameter.
+ *
+ * The "memory" clobber is only necessary for hcalls where the Hypervisor
+ * will read or write guest memory. However, we add it to all hcalls because
+ * the impact is minimal, and we want to ensure that it's present for the
+ * hcalls that need it.
 */
 
 /* List of common clobbered registers.  Do not use this macro. */
-#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc"
+#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
 
 #define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
 #define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index ce04530..aa4c488 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -16,15 +16,6 @@
 #define __ASM_POWERPC_FSL_GUTS_H__
 #ifdef __KERNEL__
 
-/*
- * These #ifdefs are safe because it's not possible to build a kernel that
- * runs on e500 and e600 cores.
- */
-
-#if !defined(CONFIG_PPC_85xx) && !defined(CONFIG_PPC_86xx)
-#error Only 85xx and 86xx SOCs are supported
-#endif
-
 /**
  * Global Utility Registers.
  *
@@ -36,11 +27,7 @@
  * different names.  In these cases, one name is chosen to avoid extraneous
  * #ifdefs.
  */
-#ifdef CONFIG_PPC_85xx
-struct ccsr_guts_85xx {
-#else
-struct ccsr_guts_86xx {
-#endif
+struct ccsr_guts {
 	__be32	porpllsr;	/* 0x.0000 - POR PLL Ratio Status Register */
 	__be32	porbmsr;	/* 0x.0004 - POR Boot Mode Status Register */
 	__be32	porimpscr;	/* 0x.0008 - POR I/O Impedance Status and Control Register */
@@ -77,11 +64,8 @@
 	u8	res0a8[0xb0 - 0xa8];
 	__be32	rstcr;		/* 0x.00b0 - Reset Control Register */
 	u8	res0b4[0xc0 - 0xb4];
-#ifdef CONFIG_PPC_85xx
-	__be32  iovselsr;	/* 0x.00c0 - I/O voltage select status register */
-#else
-	__be32	elbcvselcr;	/* 0x.00c0 - eLBC Voltage Select Ctrl Reg */
-#endif
+	__be32  iovselsr;	/* 0x.00c0 - I/O voltage select status register
+					     Called 'elbcvselcr' on 86xx SOCs */
 	u8	res0c4[0x224 - 0xc4];
 	__be32  iodelay1;	/* 0x.0224 - IO delay control register 1 */
 	__be32  iodelay2;	/* 0x.0228 - IO delay control register 2 */
@@ -136,7 +120,7 @@
  * ch: The channel on the DMA controller (0, 1, 2, or 3)
  * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
  */
-static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts,
+static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
 	unsigned int co, unsigned int ch, unsigned int device)
 {
 	unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
@@ -172,7 +156,7 @@
  * ch: The channel on the DMA controller (0, 1, 2, or 3)
  * value: the new value for the bit (0 or 1)
  */
-static inline void guts_set_pmuxcr_dma(struct ccsr_guts_86xx __iomem *guts,
+static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
 	unsigned int co, unsigned int ch, unsigned int value)
 {
 	if ((ch == 0) || (ch == 3)) {
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index cf417e51..e648af9 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -33,8 +33,6 @@
 /* Same thing, used by the generic IRQ code */
 #define NR_IRQS_LEGACY		NUM_ISA_INTERRUPTS
 
-struct irq_data;
-extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
 extern irq_hw_number_t virq_to_hw(unsigned int virq);
 
 /**
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 3f6464b..bcfdcd2 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -17,7 +17,8 @@
  * to the dma address (mapping) of the first page.
  */
 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
-				      dma_addr_t *dma_handle, gfp_t flag)
+				      dma_addr_t *dma_handle, gfp_t flag,
+				      struct dma_attrs *attrs)
 {
 	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
 				    dma_handle, dev->coherent_dma_mask, flag,
@@ -25,7 +26,8 @@
 }
 
 static void dma_iommu_free_coherent(struct device *dev, size_t size,
-				    void *vaddr, dma_addr_t dma_handle)
+				    void *vaddr, dma_addr_t dma_handle,
+				    struct dma_attrs *attrs)
 {
 	iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
 }
@@ -105,8 +107,8 @@
 }
 
 struct dma_map_ops dma_iommu_ops = {
-	.alloc_coherent		= dma_iommu_alloc_coherent,
-	.free_coherent		= dma_iommu_free_coherent,
+	.alloc			= dma_iommu_alloc_coherent,
+	.free			= dma_iommu_free_coherent,
 	.map_sg			= dma_iommu_map_sg,
 	.unmap_sg		= dma_iommu_unmap_sg,
 	.dma_supported		= dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 1ebc918..4ab88da 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -47,8 +47,8 @@
  * for everything else.
  */
 struct dma_map_ops swiotlb_dma_ops = {
-	.alloc_coherent = dma_direct_alloc_coherent,
-	.free_coherent = dma_direct_free_coherent,
+	.alloc = dma_direct_alloc_coherent,
+	.free = dma_direct_free_coherent,
 	.map_sg = swiotlb_map_sg_attrs,
 	.unmap_sg = swiotlb_unmap_sg_attrs,
 	.dma_supported = swiotlb_dma_supported,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 7d0233c..b1ec983 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -26,7 +26,8 @@
 
 
 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag)
+				dma_addr_t *dma_handle, gfp_t flag,
+				struct dma_attrs *attrs)
 {
 	void *ret;
 #ifdef CONFIG_NOT_COHERENT_CACHE
@@ -54,7 +55,8 @@
 }
 
 void dma_direct_free_coherent(struct device *dev, size_t size,
-			      void *vaddr, dma_addr_t dma_handle)
+			      void *vaddr, dma_addr_t dma_handle,
+			      struct dma_attrs *attrs)
 {
 #ifdef CONFIG_NOT_COHERENT_CACHE
 	__dma_free_coherent(size, vaddr);
@@ -150,8 +152,8 @@
 #endif
 
 struct dma_map_ops dma_direct_ops = {
-	.alloc_coherent			= dma_direct_alloc_coherent,
-	.free_coherent			= dma_direct_free_coherent,
+	.alloc				= dma_direct_alloc_coherent,
+	.free				= dma_direct_free_coherent,
 	.map_sg				= dma_direct_map_sg,
 	.unmap_sg			= dma_direct_unmap_sg,
 	.dma_supported			= dma_direct_dma_supported,
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3e57a00..ba3aeb4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -206,40 +206,43 @@
 	andi.	r10,r10,MSR_EE		/* Did EE change? */
 	beq	1f
 
-	/* Save handler and return address into the 2 unused words
-	 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
-	 * else can be recovered from the pt_regs except r3 which for
-	 * normal interrupts has been set to pt_regs and for syscalls
-	 * is an argument, so we temporarily use ORIG_GPR3 to save it
-	 */
-	stw	r9,8(r1)
-	stw	r11,12(r1)
-	stw	r3,ORIG_GPR3(r1)
 	/*
 	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
 	 * If from user mode there is only one stack frame on the stack, and
 	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
 	 * stack frame to make trace_hardirqs_off happy.
+	 *
+	 * This is handy because we also need to save a bunch of GPRs,
+	 * r3 can be different from GPR3(r1) at this point, r9 and r11
+	 * contains the old MSR and handler address respectively,
+	 * r4 & r5 can contain page fault arguments that need to be passed
+	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
+	 * they aren't useful past this point (aren't syscall arguments),
+	 * the rest is restored from the exception frame.
 	 */
+	stwu	r1,-32(r1)
+	stw	r9,8(r1)
+	stw	r11,12(r1)
+	stw	r3,16(r1)
+	stw	r4,20(r1)
+	stw	r5,24(r1)
 	andi.	r12,r12,MSR_PR
-	beq	11f
-	stwu	r1,-16(r1)
+	b	11f
 	bl	trace_hardirqs_off
-	addi	r1,r1,16
 	b	12f
-
 11:
 	bl	trace_hardirqs_off
 12:
+	lwz	r5,24(r1)
+	lwz	r4,20(r1)
+	lwz	r3,16(r1)
+	lwz	r11,12(r1)
+	lwz	r9,8(r1)
+	addi	r1,r1,32
 	lwz	r0,GPR0(r1)
-	lwz	r3,ORIG_GPR3(r1)
-	lwz	r4,GPR4(r1)
-	lwz	r5,GPR5(r1)
 	lwz	r6,GPR6(r1)
 	lwz	r7,GPR7(r1)
 	lwz	r8,GPR8(r1)
-	lwz	r9,8(r1)
-	lwz	r11,12(r1)
 1:	mtctr	r11
 	mtlr	r9
 	bctr				/* jump to handler */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index cfe7a38..18bdf74 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -40,6 +40,8 @@
 #include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/fadump.h>
+#include <asm/debug.h>
+#include <asm/setup.h>
 
 static struct fw_dump fw_dump;
 static struct fadump_mem_struct fdm;
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 79bb282..b01d14e 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -65,7 +65,8 @@
 static void *ibmebus_alloc_coherent(struct device *dev,
 				    size_t size,
 				    dma_addr_t *dma_handle,
-				    gfp_t flag)
+				    gfp_t flag,
+				    struct dma_attrs *attrs)
 {
 	void *mem;
 
@@ -77,7 +78,8 @@
 
 static void ibmebus_free_coherent(struct device *dev,
 				  size_t size, void *vaddr,
-				  dma_addr_t dma_handle)
+				  dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	kfree(vaddr);
 }
@@ -136,8 +138,8 @@
 }
 
 static struct dma_map_ops ibmebus_dma_ops = {
-	.alloc_coherent     = ibmebus_alloc_coherent,
-	.free_coherent      = ibmebus_free_coherent,
+	.alloc              = ibmebus_alloc_coherent,
+	.free               = ibmebus_free_coherent,
 	.map_sg             = ibmebus_map_sg,
 	.unmap_sg           = ibmebus_unmap_sg,
 	.dma_supported      = ibmebus_dma_supported,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 243dbab..5ec1b23 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -560,12 +560,6 @@
 	local_irq_restore(flags);
 }
 
-irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
-{
-	return d->hwirq;
-}
-EXPORT_SYMBOL_GPL(irqd_to_hwirq);
-
 irq_hw_number_t virq_to_hw(unsigned int virq)
 {
 	struct irq_data *irq_data = irq_get_irq_data(virq);
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 76a6e40..782bd0a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -24,6 +24,7 @@
 #include <asm/current.h>
 #include <asm/processor.h>
 #include <asm/machdep.h>
+#include <asm/debug.h>
 
 /*
  * This table contains the mapping between PowerPC hardware trap types, and
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f88698c..4937c96 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1235,7 +1235,7 @@
 	ctrl |= CTRL_RUNLATCH;
 	mtspr(SPRN_CTRLT, ctrl);
 
-	ti->local_flags |= TLF_RUNLATCH;
+	ti->local_flags |= _TLF_RUNLATCH;
 }
 
 /* Called with hard IRQs off */
@@ -1244,7 +1244,7 @@
 	struct thread_info *ti = current_thread_info();
 	unsigned long ctrl;
 
-	ti->local_flags &= ~TLF_RUNLATCH;
+	ti->local_flags &= ~_TLF_RUNLATCH;
 
 	ctrl = mfspr(SPRN_CTRLF);
 	ctrl &= ~CTRL_RUNLATCH;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index b2f7c84..a3a9990 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -482,7 +482,8 @@
 }
 
 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
-                                          dma_addr_t *dma_handle, gfp_t flag)
+					  dma_addr_t *dma_handle, gfp_t flag,
+					  struct dma_attrs *attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
 	void *ret;
@@ -492,7 +493,7 @@
 		return NULL;
 	}
 
-	ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
+	ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
 	if (unlikely(ret == NULL)) {
 		vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 		atomic_inc(&viodev->cmo.allocs_failed);
@@ -502,11 +503,12 @@
 }
 
 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
-                                        void *vaddr, dma_addr_t dma_handle)
+					void *vaddr, dma_addr_t dma_handle,
+					struct dma_attrs *attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
 
-	dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
+	dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
 
 	vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 }
@@ -607,8 +609,8 @@
 }
 
 struct dma_map_ops vio_dma_mapping_ops = {
-	.alloc_coherent    = vio_dma_iommu_alloc_coherent,
-	.free_coherent     = vio_dma_iommu_free_coherent,
+	.alloc             = vio_dma_iommu_alloc_coherent,
+	.free              = vio_dma_iommu_free_coherent,
 	.map_sg            = vio_dma_iommu_map_sg,
 	.unmap_sg          = vio_dma_iommu_unmap_sg,
 	.map_page          = vio_dma_iommu_map_page,
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index f1950d1..135663a 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -21,6 +21,7 @@
 #include <asm/disassemble.h>
 #include <asm/kvm_book3s.h>
 #include <asm/reg.h>
+#include <asm/switch_to.h>
 
 #define OP_19_XOP_RFID		18
 #define OP_19_XOP_RFI		50
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index bed1279..e1b60f5 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -173,9 +173,9 @@
 
 static struct kvmppc_linear_info *kvm_alloc_linear(int type)
 {
-	struct kvmppc_linear_info *ri;
+	struct kvmppc_linear_info *ri, *ret;
 
-	ri = NULL;
+	ret = NULL;
 	spin_lock(&linear_lock);
 	list_for_each_entry(ri, &free_linears, list) {
 		if (ri->type != type)
@@ -183,11 +183,12 @@
 
 		list_del(&ri->list);
 		atomic_inc(&ri->use_count);
+		memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
+		ret = ri;
 		break;
 	}
 	spin_unlock(&linear_lock);
-	memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
-	return ri;
+	return ret;
 }
 
 static void kvm_release_linear(struct kvmppc_linear_info *ri)
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 3f7b674..d3fb4df 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -46,8 +46,10 @@
 	/* Save host state to the stack */
 	stdu	r1, -SWITCH_FRAME_SIZE(r1)
 
-	/* Save non-volatile registers (r14 - r31) */
+	/* Save non-volatile registers (r14 - r31) and CR */
 	SAVE_NVGPRS(r1)
+	mfcr	r3
+	std	r3, _CCR(r1)
 
 	/* Save host DSCR */
 BEGIN_FTR_SECTION
@@ -157,8 +159,10 @@
 	 * R13      = PACA
 	 */
 
-	/* Restore non-volatile host registers (r14 - r31) */
+	/* Restore non-volatile host registers (r14 - r31) and CR */
 	REST_NVGPRS(r1)
+	ld	r4, _CCR(r1)
+	mtcr	r4
 
 	addi    r1, r1, SWITCH_FRAME_SIZE
 	ld	r0, PPC_LR_STKOFF(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 0a8515a..3e35383 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -84,6 +84,10 @@
 	/* Save non-volatile registers (r14 - r31) */
 	SAVE_NVGPRS(r1)
 
+	/* Save CR */
+	mfcr	r14
+	stw	r14, _CCR(r1)
+
 	/* Save LR */
 	PPC_STL	r0, _LINK(r1)
 
@@ -165,6 +169,9 @@
 	PPC_LL	r4, _LINK(r1)
 	mtlr	r4
 
+	lwz	r14, _CCR(r1)
+	mtcr	r14
+
 	/* Restore non-volatile host registers (r14 - r31) */
 	REST_NVGPRS(r1)
 
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index e70ef2d..a59a25a 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -24,6 +24,7 @@
 #include <asm/kvm_fpu.h>
 #include <asm/reg.h>
 #include <asm/cacheflush.h>
+#include <asm/switch_to.h>
 #include <linux/vmalloc.h>
 
 /* #define DEBUG */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7340e10..7759053 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -33,6 +33,7 @@
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
 #include <asm/mmu_context.h>
+#include <asm/switch_to.h>
 #include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
@@ -776,6 +777,7 @@
 	}
 	}
 
+	preempt_disable();
 	if (!(r & RESUME_HOST)) {
 		/* To avoid clobbering exit_reason, only check for signals if
 		 * we aren't already exiting to userspace for some other
@@ -797,8 +799,6 @@
 			run->exit_reason = KVM_EXIT_INTR;
 			r = -EINTR;
 		} else {
-			preempt_disable();
-
 			/* In case an interrupt came in that was triggered
 			 * from userspace (like DEC), we need to check what
 			 * to inject now! */
@@ -880,7 +880,8 @@
 
 	switch (reg->id) {
 	case KVM_REG_PPC_HIOR:
-		r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
+		r = copy_to_user((u64 __user *)(long)reg->addr,
+				&to_book3s(vcpu)->hior, sizeof(u64));
 		break;
 	default:
 		break;
@@ -895,7 +896,8 @@
 
 	switch (reg->id) {
 	case KVM_REG_PPC_HIOR:
-		r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
+		r = copy_from_user(&to_book3s(vcpu)->hior,
+				   (u64 __user *)(long)reg->addr, sizeof(u64));
 		if (!r)
 			to_book3s(vcpu)->hior_explicit = true;
 		break;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 10d8ef6..c8c4b87 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -34,7 +34,8 @@
 /* r2 is special: it holds 'current', and it made nonvolatile in the
  * kernel with the -ffixed-r2 gcc option. */
 #define HOST_R2         12
-#define HOST_NV_GPRS    16
+#define HOST_CR         16
+#define HOST_NV_GPRS    20
 #define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
 #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
@@ -296,8 +297,10 @@
 
 	/* Return to kvm_vcpu_run(). */
 	lwz	r4, HOST_STACK_LR(r1)
+	lwz	r5, HOST_CR(r1)
 	addi	r1, r1, HOST_STACK_SIZE
 	mtlr	r4
+	mtcr	r5
 	/* r3 still contains the return code from kvmppc_handle_exit(). */
 	blr
 
@@ -314,6 +317,8 @@
 	stw	r3, HOST_RUN(r1)
 	mflr	r3
 	stw	r3, HOST_STACK_LR(r1)
+	mfcr	r5
+	stw	r5, HOST_CR(r1)
 
 	/* Save host non-volatile register state to stack. */
 	stw	r14, HOST_NV_GPR(r14)(r1)
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index bfb11e0..e2d401a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -93,7 +93,7 @@
 };
 
 /* MPC5200 device tree match tables */
-const struct of_device_id mpc52xx_pci_ids[] __initdata = {
+const struct of_device_id mpc52xx_pci_ids[] __initconst = {
 	{ .type = "pci", .compatible = "fsl,mpc5200-pci", },
 	{ .type = "pci", .compatible = "mpc5200-pci", },
 	{}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 3754ddc..9a6f044 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -270,7 +270,7 @@
 
 	if (machine_is(p1021_mds)) {
 
-		struct ccsr_guts_85xx __iomem *guts;
+		struct ccsr_guts __iomem *guts;
 
 		np = of_find_node_by_name(NULL, "global-utilities");
 		if (np) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 9848f9e..313fce4 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -127,7 +127,7 @@
 #if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
 	if (machine_is(p1025_rdb)) {
 
-		struct ccsr_guts_85xx __iomem *guts;
+		struct ccsr_guts __iomem *guts;
 
 		np = of_find_node_by_name(NULL, "global-utilities");
 		if (np) {
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 0fe88e3..e74b7cd 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -150,7 +150,7 @@
 {
 	struct device_node *guts_node;
 	struct device_node *indirect_node = NULL;
-	struct ccsr_guts_85xx __iomem *guts;
+	struct ccsr_guts __iomem *guts;
 	u8 __iomem *lbc_lcs0_ba = NULL;
 	u8 __iomem *lbc_lcs1_ba = NULL;
 	u8 b;
@@ -269,7 +269,7 @@
 void p1022ds_set_pixel_clock(unsigned int pixclock)
 {
 	struct device_node *guts_np = NULL;
-	struct ccsr_guts_85xx __iomem *guts;
+	struct ccsr_guts __iomem *guts;
 	unsigned long freq;
 	u64 temp;
 	u32 pxclk;
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index bbc6152..62cd3c5 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -225,7 +225,7 @@
 void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
 {
 	struct device_node *guts_np = NULL;
-	struct ccsr_guts_86xx __iomem *guts;
+	struct ccsr_guts __iomem *guts;
 	unsigned long freq;
 	u64 temp;
 	u32 pxclk;
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index db360fc..d09f3e8 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -392,7 +392,7 @@
 	}
 	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
 
-	msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
+	msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
 	if (!msic->irq_domain) {
 		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
 		       dn->full_name);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index e5c3a2c..f9a48af 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -239,7 +239,7 @@
 	ppc_md.get_irq = beatic_get_irq;
 
 	/* Allocate an irq host */
-	beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
+	beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
 	BUG_ON(beatic_host == NULL);
 	irq_set_default_host(beatic_host);
 }
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index ae9fc7b..b9f509a 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -564,7 +564,8 @@
 /* A coherent allocation implies strong ordering */
 
 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
-				      dma_addr_t *dma_handle, gfp_t flag)
+				      dma_addr_t *dma_handle, gfp_t flag,
+				      struct dma_attrs *attrs)
 {
 	if (iommu_fixed_is_weak)
 		return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
@@ -572,18 +573,19 @@
 					    device_to_mask(dev), flag,
 					    dev_to_node(dev));
 	else
-		return dma_direct_ops.alloc_coherent(dev, size, dma_handle,
-						     flag);
+		return dma_direct_ops.alloc(dev, size, dma_handle, flag,
+					    attrs);
 }
 
 static void dma_fixed_free_coherent(struct device *dev, size_t size,
-				    void *vaddr, dma_addr_t dma_handle)
+				    void *vaddr, dma_addr_t dma_handle,
+				    struct dma_attrs *attrs)
 {
 	if (iommu_fixed_is_weak)
 		iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
 				    dma_handle);
 	else
-		dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
+		dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
 }
 
 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
@@ -642,8 +644,8 @@
 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
 
 struct dma_map_ops dma_iommu_fixed_ops = {
-	.alloc_coherent = dma_fixed_alloc_coherent,
-	.free_coherent  = dma_fixed_free_coherent,
+	.alloc          = dma_fixed_alloc_coherent,
+	.free           = dma_fixed_free_coherent,
 	.map_sg         = dma_fixed_map_sg,
 	.unmap_sg       = dma_fixed_unmap_sg,
 	.dma_supported  = dma_fixed_dma_supported,
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index 7f9b674..6e3409d 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -61,7 +61,7 @@
 	printk("*** %04x : %s\n", hex, s ? s : "");
 }
 
-static const struct of_device_id qpace_bus_ids[] __initdata = {
+static const struct of_device_id qpace_bus_ids[] __initconst = {
 	{ .type = "soc", },
 	{ .compatible = "soc", },
 	{ .type = "spider", },
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index fa3e294..4ab0876 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -140,7 +140,7 @@
 	return 0;
 }
 
-static const struct of_device_id cell_bus_ids[] __initdata = {
+static const struct of_device_id cell_bus_ids[] __initconst = {
 	{ .type = "soc", },
 	{ .compatible = "soc", },
 	{ .type = "spider", },
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a81e5a8..b4ddaa3 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -192,7 +192,7 @@
 {
 	int rc = -ENOMEM;
 
-	psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL);
+	psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL);
 
 	if (psurge_host)
 		psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 2a4ff86..5f3b232 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -753,9 +753,8 @@
 	unsigned cpu;
 	struct irq_domain *host;
 
-	host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL);
+	host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
 	irq_set_default_host(host);
-	irq_set_virq_count(PS3_PLUG_MAX + 1);
 
 	for_each_possible_cpu(cpu) {
 		struct ps3_private *pd = &per_cpu(ps3_private, cpu);
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 880eb9c..5606fe3 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -515,7 +515,8 @@
  * to the dma address (mapping) of the first page.
  */
 static void * ps3_alloc_coherent(struct device *_dev, size_t size,
-				      dma_addr_t *dma_handle, gfp_t flag)
+				 dma_addr_t *dma_handle, gfp_t flag,
+				 struct dma_attrs *attrs)
 {
 	int result;
 	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@@ -552,7 +553,7 @@
 }
 
 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
-	dma_addr_t dma_handle)
+			      dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 
@@ -701,8 +702,8 @@
 }
 
 static struct dma_map_ops ps3_sb_dma_ops = {
-	.alloc_coherent = ps3_alloc_coherent,
-	.free_coherent = ps3_free_coherent,
+	.alloc = ps3_alloc_coherent,
+	.free = ps3_free_coherent,
 	.map_sg = ps3_sb_map_sg,
 	.unmap_sg = ps3_sb_unmap_sg,
 	.dma_supported = ps3_dma_supported,
@@ -712,8 +713,8 @@
 };
 
 static struct dma_map_ops ps3_ioc0_dma_ops = {
-	.alloc_coherent = ps3_alloc_coherent,
-	.free_coherent = ps3_free_coherent,
+	.alloc = ps3_alloc_coherent,
+	.free = ps3_free_coherent,
 	.map_sg = ps3_ioc0_map_sg,
 	.unmap_sg = ps3_ioc0_unmap_sg,
 	.dma_supported = ps3_dma_supported,
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 4a47525..4cb375c 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -59,8 +59,7 @@
 	struct eeh_event *event;
 	struct eeh_dev *edev;
 
-	daemonize("eehd");
-	set_current_state(TASK_INTERRUPTIBLE);
+	set_task_comm(current, "eehd");
 
 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
 	event = NULL;
@@ -83,6 +82,7 @@
 	printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
 	       eeh_pci_name(edev->pdev));
 
+	set_current_state(TASK_INTERRUPTIBLE);	/* Don't add to load average */
 	edev = handle_eeh_events(event);
 
 	eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index ceb09cb..818e763 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Freescale Semicondutor, Inc. All rights reserved.
  *
  * Authors: 	Shlomi Gridish <gridish@freescale.com>
  * 		Li Yang <leoli@freescale.com>
@@ -266,7 +266,19 @@
 static void qe_snums_init(void)
 {
 	int i;
-	static const u8 snum_init[] = {
+	static const u8 snum_init_76[] = {
+		0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+		0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+		0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+		0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+		0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+		0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+		0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+		0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+		0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+		0xF4, 0xF5, 0xFC, 0xFD,
+	};
+	static const u8 snum_init_46[] = {
 		0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
 		0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
 		0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
@@ -274,9 +286,15 @@
 		0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
 		0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
 	};
+	static const u8 *snum_init;
 
 	qe_num_of_snum = qe_get_num_of_snums();
 
+	if (qe_num_of_snum == 76)
+		snum_init = snum_init_76;
+	else
+		snum_init = snum_init_46;
+
 	for (i = 0; i < qe_num_of_snum; i++) {
 		snums[i].num = snum_init[i];
 		snums[i].state = QE_SNUM_STATE_FREE;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index e49db5d..a3afecd 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,8 @@
 #ifndef _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H
 
+#include <asm/facility.h>
+
 #define CPU_MF_INT_SF_IAE	(1 << 31)	/* invalid entry address */
 #define CPU_MF_INT_SF_ISE	(1 << 30)	/* incorrect SDBT entry */
 #define CPU_MF_INT_SF_PRA	(1 << 29)	/* program request alert */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 1c7d6ce..6340178 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -1,6 +1,8 @@
 #ifndef __MMU_H
 #define __MMU_H
 
+#include <linux/errno.h>
+
 typedef struct {
 	atomic_t attach_count;
 	unsigned int flush_mm;
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index ac39e7a..87f080b 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
+#include <asm/facility.h>
 #include <asm/sysinfo.h>
 #include <asm/ebcdic.h>
 #include <asm/debug.h>
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 8481ecf..4640508 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -18,7 +18,7 @@
 #include <linux/notifier.h>
 #include <linux/init.h>
 #include <linux/export.h>
-#include <asm/system.h>
+#include <asm/ctl_reg.h>
 #include <asm/irq.h>
 #include <asm/cpu_mf.h>
 
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 609f985..f58f37f 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -15,7 +15,6 @@
 #include <linux/perf_event.h>
 #include <linux/percpu.h>
 #include <linux/export.h>
-#include <asm/system.h>
 #include <asm/irq.h>
 #include <asm/cpu_mf.h>
 #include <asm/lowcore.h>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1581ea2..06264ae 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -50,6 +50,7 @@
 
 #include <asm/ipl.h>
 #include <asm/uaccess.h>
+#include <asm/facility.h>
 #include <asm/smp.h>
 #include <asm/mmu_context.h>
 #include <asm/cpcmd.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a8bf999..1f77227 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -32,6 +32,8 @@
 #include <linux/slab.h>
 #include <linux/crash_dump.h>
 #include <asm/asm-offsets.h>
+#include <asm/switch_to.h>
+#include <asm/facility.h>
 #include <asm/ipl.h>
 #include <asm/setup.h>
 #include <asm/irq.h>
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index c1d5a82..5f2bb42 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -61,6 +61,7 @@
 config DWARF_UNWINDER
 	bool "Enable the DWARF unwinder for stacktraces"
 	select FRAME_POINTER
+	depends on SUPERH32
 	default n
 	help
 	  Enabling this option will make stacktraces more accurate, at
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index d879848..d0d6221 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -28,6 +28,7 @@
 #include <cpu/sh7785.h>
 #include <asm/heartbeat.h>
 #include <asm/clock.h>
+#include <asm/bl_bit.h>
 
 /*
  * NOTE: This board has 2 physical memory maps.
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index adc9b4b..8b50cf7 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -14,6 +14,7 @@
 #include <linux/gfp.h>
 #include <asm/io.h>
 #include <asm/hd64461.h>
+#include <asm/bl_bit.h>
 #include <mach/hp6xx.h>
 #include <cpu/dac.h>
 #include <asm/freq.h>
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index e4ea31a..58592df 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -8,8 +8,6 @@
 # Copyright (C) 1999 Stuart Menefy
 #
 
-MKIMAGE := $(srctree)/scripts/mkuboot.sh
-
 #
 # Assign safe dummy values if these variables are not defined,
 # in order to suppress error message.
@@ -61,10 +59,8 @@
 			$(KERNEL_MEMORY) + \
 			$(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]')
 
-quiet_cmd_uimage = UIMAGE  $@
-      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sh -O linux -T kernel \
-		   -C $(2) -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \
-		   -n 'Linux-$(KERNELRELEASE)' -d $< $@
+UIMAGE_LOADADDR = $(KERNEL_LOAD)
+UIMAGE_ENTRYADDR = $(KERNEL_ENTRY)
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index b1cb271..67ee956 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -54,7 +54,7 @@
 	if (unlikely(ret))
 		return ret;
 
-	return device_create_file(dma_subsys.dev_root, &dev_attr_devices.attr);
+	return device_create_file(dma_subsys.dev_root, &dev_attr_devices);
 }
 postcore_initcall(dma_subsys_init);
 
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 1a73c3e..8bd965e 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -52,25 +52,31 @@
 	return dma_addr == 0;
 }
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t gfp,
+				    struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 	void *memory;
 
 	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
 		return memory;
-	if (!ops->alloc_coherent)
+	if (!ops->alloc)
 		return NULL;
 
-	memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
+	memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
 	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
 
 	return memory;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *vaddr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
@@ -78,14 +84,16 @@
 		return;
 
 	debug_dma_free_coherent(dev, size, vaddr, dma_handle);
-	if (ops->free_coherent)
-		ops->free_coherent(dev, size, vaddr, dma_handle);
+	if (ops->free)
+		ops->free(dev, size, vaddr, dma_handle, attrs);
 }
 
 /* arch/sh/mm/consistent.c */
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
-					dma_addr_t *dma_addr, gfp_t flag);
+					dma_addr_t *dma_addr, gfp_t flag,
+					struct dma_attrs *attrs);
 extern void dma_generic_free_coherent(struct device *dev, size_t size,
-				      void *vaddr, dma_addr_t dma_handle);
+				      void *vaddr, dma_addr_t dma_handle,
+				      struct dma_attrs *attrs);
 
 #endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index 7f1b70c..f8f7af5 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -2,6 +2,7 @@
 #include <linux/slab.h>
 #include <asm/processor.h>
 #include <asm/fpu.h>
+#include <asm/traps.h>
 
 int init_fpu(struct task_struct *tsk)
 {
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index 488d24e..98bbaa4 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -14,6 +14,7 @@
 #include <asm/processor.h>
 #include <asm/io.h>
 #include <asm/fpu.h>
+#include <asm/traps.h>
 
 /* The PR (precision) bit in the FP Status Register must be clear when
  * an frchg instruction is executed, otherwise the instruction is undefined.
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e74cd6c..69ab4d3 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -16,6 +16,7 @@
 #include <cpu/fpu.h>
 #include <asm/processor.h>
 #include <asm/fpu.h>
+#include <asm/traps.h>
 
 /* The PR (precision) bit in the FP Status Register must be clear when
  * an frchg instruction is executed, otherwise the instruction is undefined.
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index 5853989..04ab5ae 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -113,7 +113,7 @@
 	CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
 
 	/* MSTP32 clocks */
-	CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]),
+	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]),
 	CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]),
 	CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]),
 	CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]),
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 6d62eb4..1ddc876 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -29,7 +29,6 @@
 				int index)
 {
 	unsigned long allowed_mode = SUSP_SH_SLEEP;
-	ktime_t before, after;
 	int requested_state = index;
 	int allowed_state;
 	int k;
@@ -47,19 +46,16 @@
 	 */
 	k = min_t(int, allowed_state, requested_state);
 
-	before = ktime_get();
 	sh_mobile_call_standby(cpuidle_mode[k]);
-	after = ktime_get();
-
-	dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
 
 	return k;
 }
 
 static struct cpuidle_device cpuidle_dev;
 static struct cpuidle_driver cpuidle_driver = {
-	.name =		"sh_idle",
-	.owner =	THIS_MODULE,
+	.name			= "sh_idle",
+	.owner			= THIS_MODULE,
+	.en_core_tk_irqen	= 1,
 };
 
 void sh_mobile_setup_cpuidle(void)
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index a6f95ae..08d27fa 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -16,6 +16,7 @@
 #include <asm/suspend.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
+#include <asm/bl_bit.h>
 
 /*
  * Notifier lists for pre/post sleep notification
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
index 3c55b87..5b0bfcd 100644
--- a/arch/sh/kernel/dma-nommu.c
+++ b/arch/sh/kernel/dma-nommu.c
@@ -63,8 +63,8 @@
 #endif
 
 struct dma_map_ops nommu_dma_ops = {
-	.alloc_coherent		= dma_generic_alloc_coherent,
-	.free_coherent		= dma_generic_free_coherent,
+	.alloc			= dma_generic_alloc_coherent,
+	.free			= dma_generic_free_coherent,
 	.map_page		= nommu_map_page,
 	.map_sg			= nommu_map_sg,
 #ifdef CONFIG_DMA_NONCOHERENT
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 64852ec..ee226e2 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -17,8 +17,8 @@
 #include <linux/irqflags.h>
 #include <linux/smp.h>
 #include <linux/cpuidle.h>
-#include <asm/pgalloc.h>
 #include <linux/atomic.h>
+#include <asm/pgalloc.h>
 #include <asm/smp.h>
 #include <asm/bl_bit.h>
 
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index efb6d39..b117781 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -14,6 +14,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <asm/cacheflush.h>
+#include <asm/traps.h>
 
 /* Macros for single step instruction identification */
 #define OPCODE_BT(op)		(((op) & 0xff00) == 0x8900)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index f72e3a9..94273aa 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -26,6 +26,7 @@
 #include <asm/mmu_context.h>
 #include <asm/fpu.h>
 #include <asm/syscalls.h>
+#include <asm/switch_to.h>
 
 void show_regs(struct pt_regs * regs)
 {
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index a17a14d..eaebdf6 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -27,6 +27,7 @@
 #include <asm/smp.h>
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
+#include <asm/setup.h>
 
 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
index 555a64f..23af175 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
@@ -34,6 +34,41 @@
 1:	.short	__NR_rt_sigreturn
 .LEND_rt_sigreturn:
 	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
+	.previous
 
 	.section .eh_frame,"a",@progbits
+.LCIE1:
+	.ualong	.LCIE1_end - .LCIE1_start
+.LCIE1_start:
+	.ualong	0		/* CIE ID */
+	.byte	0x1		/* Version number */
+	.string	"zRS"		/* NUL-terminated augmentation string */
+	.uleb128 0x1		/* Code alignment factor */
+	.sleb128 -4		/* Data alignment factor */
+	.byte	0x11		/* Return address register column */
+	.uleb128 0x1		/* Augmentation length and data */
+	.byte 0x1b              /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+	.byte	0xc, 0xf, 0x0	/* DW_CFA_def_cfa: r15 ofs 0 */
+
+	.align 2
+.LCIE1_end:
+
+	.ualong	.LFDE0_end-.LFDE0_start	/* Length FDE0 */
+.LFDE0_start:
+	.ualong	.LFDE0_start-.LCIE1	/* CIE pointer */
+	.ualong	.LSTART_sigreturn-.	/* PC-relative start address */
+	.ualong	.LEND_sigreturn-.LSTART_sigreturn
+	.uleb128 0			/* Augmentation */
+	.align 2
+.LFDE0_end:
+
+	.ualong	.LFDE1_end-.LFDE1_start	/* Length FDE1 */
+.LFDE1_start:
+	.ualong	.LFDE1_start-.LCIE1	/* CIE pointer */
+	.ualong	.LSTART_rt_sigreturn-.	/* PC-relative start address */
+	.ualong	.LEND_rt_sigreturn-.LSTART_rt_sigreturn
+	.uleb128 0			/* Augmentation */
+	.align 2
+.LFDE1_end:
+
 	.previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3e70f85..0eb74d0 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -3,37 +3,34 @@
 	.type __kernel_vsyscall,@function
 __kernel_vsyscall:
 .LSTART_vsyscall:
-	/* XXX: We'll have to do something here once we opt to use the vDSO
-	 * page for something other than the signal trampoline.. as well as
-	 * fill out .eh_frame -- PFM. */
+	trapa	#0x10
+	 nop
 .LEND_vsyscall:
 	.size __kernel_vsyscall,.-.LSTART_vsyscall
+	.previous
 
 	.section .eh_frame,"a",@progbits
-	.previous
 .LCIE:
 	.ualong	.LCIE_end - .LCIE_start
 .LCIE_start:
 	.ualong	0		/* CIE ID */
 	.byte	0x1		/* Version number */
-	.string	"zRS"		/* NUL-terminated augmentation string */
+	.string	"zR"		/* NUL-terminated augmentation string */
 	.uleb128 0x1		/* Code alignment factor */
 	.sleb128 -4		/* Data alignment factor */
 	.byte	0x11		/* Return address register column */
-				/* Augmentation length and data (none) */
-	.byte	0xc		/* DW_CFA_def_cfa */
-	.uleb128 0xf		/* r15 */
-	.uleb128 0x0		/* offset 0 */
-
+	.uleb128 0x1		/* Augmentation length and data */
+	.byte 0x1b              /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+	.byte	0xc,0xf,0x0	/* DW_CFA_def_cfa: r15 ofs 0 */
 	.align 2
 .LCIE_end:
 
 	.ualong	.LFDE_end-.LFDE_start	/* Length FDE */
 .LFDE_start:
-	.ualong	.LCIE			/* CIE pointer */
-	.ualong	.LSTART_vsyscall-.	/* start address */
+	.ualong	.LFDE_start-.LCIE	/* CIE pointer */
+	.ualong	.LSTART_vsyscall-.	/* PC-relative start address */
 	.ualong	.LEND_vsyscall-.LSTART_vsyscall
-	.uleb128 0
+	.uleb128 0			/* Augmentation */
 	.align 2
 .LFDE_end:
 	.previous
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 112fea1..0e52928 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -18,6 +18,7 @@
 #include <linux/highmem.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
+#include <asm/cache_insns.h>
 #include <asm/cacheflush.h>
 
 /*
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index f251b5f..b81d9db 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -33,7 +33,8 @@
 fs_initcall(dma_init);
 
 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
-				 dma_addr_t *dma_handle, gfp_t gfp)
+				 dma_addr_t *dma_handle, gfp_t gfp,
+				 struct dma_attrs *attrs)
 {
 	void *ret, *ret_nocache;
 	int order = get_order(size);
@@ -64,7 +65,8 @@
 }
 
 void dma_generic_free_coherent(struct device *dev, size_t size,
-			       void *vaddr, dma_addr_t dma_handle)
+			       void *vaddr, dma_addr_t dma_handle,
+			       struct dma_attrs *attrs)
 {
 	int order = get_order(size);
 	unsigned long pfn = dma_handle >> PAGE_SHIFT;
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
index 75a17f5..0b85dd9 100644
--- a/arch/sh/mm/flush-sh4.c
+++ b/arch/sh/mm/flush-sh4.c
@@ -1,5 +1,6 @@
 #include <linux/mm.h>
 #include <asm/mmu_context.h>
+#include <asm/cache_insns.h>
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 
diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c
index bc156ec..2d8fa71 100644
--- a/arch/sh/mm/sram.c
+++ b/arch/sh/mm/sram.c
@@ -9,6 +9,7 @@
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/errno.h>
 #include <asm/sram.h>
 
 /*
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 9205416..d56d199 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -5,7 +5,6 @@
 
 ROOT_IMG	:= /usr/src/root.img
 ELFTOAOUT	:= elftoaout
-MKIMAGE 	:= $(srctree)/scripts/mkuboot.sh
 
 hostprogs-y	:= piggyback btfixupprep
 targets		:= tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
@@ -92,11 +91,9 @@
 $(obj)/image.gz: $(obj)/image.bin
 	$(call if_changed,gzip)
 
-quiet_cmd_uimage = UIMAGE  $@
-      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sparc -O linux -T kernel \
-               -C gzip -a $(CONFIG_UBOOT_LOAD_ADDR) \
-	       -e $(CONFIG_UBOOT_ENTRY_ADDR) -n 'Linux-$(KERNELRELEASE)' \
-               -d $< $@
+UIMAGE_LOADADDR = $(CONFIG_UBOOT_LOAD_ADDR)
+UIMAGE_ENTRYADDR = $(CONFIG_UBOOT_ENTRY_ADDR)
+UIMAGE_COMPRESSION = gzip
 
 quiet_cmd_uimage.o = UIMAGE.O $@
       cmd_uimage.o = $(LD) -Tdata $(CONFIG_UBOOT_FLASH_ADDR) \
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 8c0e4f7..48a7c65 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -26,24 +26,30 @@
 
 #include <asm-generic/dma-mapping-common.h>
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t flag,
+				    struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 	void *cpu_addr;
 
-	cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
+	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
 	return cpu_addr;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *cpu_addr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-	ops->free_coherent(dev, size, cpu_addr, dma_handle);
+	ops->free(dev, size, cpu_addr, dma_handle, attrs);
 }
 
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 6fa2f79..76e4a52 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -12,8 +12,6 @@
  * the SpitFire page tables.
  */
 
-#include <asm-generic/pgtable-nopud.h>
-
 #include <linux/compiler.h>
 #include <linux/const.h>
 #include <asm/types.h>
@@ -22,6 +20,8 @@
 #include <asm/page.h>
 #include <asm/processor.h>
 
+#include <asm-generic/pgtable-nopud.h>
+
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
  * The TSB is mapped in the 0x8000000 to 0xa000000 range.
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index ef8c7c0..fd9c3f2 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -165,6 +165,7 @@
 #ifdef __KERNEL__
 
 #include <linux/threads.h>
+#include <asm/switch_to.h>
 
 static inline int pt_regs_trap_type(struct pt_regs *regs)
 {
@@ -240,6 +241,7 @@
 #ifndef __ASSEMBLY__
 
 #ifdef __KERNEL__
+#include <asm/switch_to.h>
 
 static inline bool pt_regs_is_syscall(struct pt_regs *regs)
 {
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 4643d68..070ed14 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -280,7 +280,8 @@
 }
 
 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
-				   dma_addr_t *dma_addrp, gfp_t gfp)
+				   dma_addr_t *dma_addrp, gfp_t gfp,
+				   struct dma_attrs *attrs)
 {
 	unsigned long flags, order, first_page;
 	struct iommu *iommu;
@@ -330,7 +331,8 @@
 }
 
 static void dma_4u_free_coherent(struct device *dev, size_t size,
-				 void *cpu, dma_addr_t dvma)
+				 void *cpu, dma_addr_t dvma,
+				 struct dma_attrs *attrs)
 {
 	struct iommu *iommu;
 	unsigned long flags, order, npages;
@@ -825,8 +827,8 @@
 }
 
 static struct dma_map_ops sun4u_dma_ops = {
-	.alloc_coherent		= dma_4u_alloc_coherent,
-	.free_coherent		= dma_4u_free_coherent,
+	.alloc			= dma_4u_alloc_coherent,
+	.free			= dma_4u_free_coherent,
 	.map_page		= dma_4u_map_page,
 	.unmap_page		= dma_4u_unmap_page,
 	.map_sg			= dma_4u_map_sg,
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index d0479e2..21bd739 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -261,7 +261,8 @@
  * CPU may access them without any explicit flushing.
  */
 static void *sbus_alloc_coherent(struct device *dev, size_t len,
-				 dma_addr_t *dma_addrp, gfp_t gfp)
+				 dma_addr_t *dma_addrp, gfp_t gfp,
+				 struct dma_attrs *attrs)
 {
 	struct platform_device *op = to_platform_device(dev);
 	unsigned long len_total = PAGE_ALIGN(len);
@@ -315,7 +316,7 @@
 }
 
 static void sbus_free_coherent(struct device *dev, size_t n, void *p,
-			       dma_addr_t ba)
+			       dma_addr_t ba, struct dma_attrs *attrs)
 {
 	struct resource *res;
 	struct page *pgv;
@@ -407,8 +408,8 @@
 }
 
 struct dma_map_ops sbus_dma_ops = {
-	.alloc_coherent		= sbus_alloc_coherent,
-	.free_coherent		= sbus_free_coherent,
+	.alloc			= sbus_alloc_coherent,
+	.free			= sbus_free_coherent,
 	.map_page		= sbus_map_page,
 	.unmap_page		= sbus_unmap_page,
 	.map_sg			= sbus_map_sg,
@@ -436,7 +437,8 @@
  * hwdev should be valid struct pci_dev pointer for PCI devices.
  */
 static void *pci32_alloc_coherent(struct device *dev, size_t len,
-				  dma_addr_t *pba, gfp_t gfp)
+				  dma_addr_t *pba, gfp_t gfp,
+				  struct dma_attrs *attrs)
 {
 	unsigned long len_total = PAGE_ALIGN(len);
 	void *va;
@@ -489,7 +491,7 @@
  * past this call are illegal.
  */
 static void pci32_free_coherent(struct device *dev, size_t n, void *p,
-				dma_addr_t ba)
+				dma_addr_t ba, struct dma_attrs *attrs)
 {
 	struct resource *res;
 
@@ -645,8 +647,8 @@
 }
 
 struct dma_map_ops pci32_dma_ops = {
-	.alloc_coherent		= pci32_alloc_coherent,
-	.free_coherent		= pci32_free_coherent,
+	.alloc			= pci32_alloc_coherent,
+	.free			= pci32_free_coherent,
 	.map_page		= pci32_map_page,
 	.unmap_page		= pci32_unmap_page,
 	.map_sg			= pci32_map_sg,
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 971fd43..48565c1 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -6,6 +6,8 @@
 #include <linux/jump_label.h>
 #include <linux/memory.h>
 
+#include <asm/cacheflush.h>
+
 #ifdef HAVE_JUMP_LABEL
 
 void arch_jump_label_transform(struct jump_entry *entry,
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 768290a..c875955 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -7,6 +7,7 @@
 #include <linux/kdebug.h>
 #include <linux/ftrace.h>
 
+#include <asm/cacheflush.h>
 #include <asm/kdebug.h>
 #include <asm/ptrace.h>
 #include <asm/irq.h>
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index a19c8a0..35e4367 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -104,11 +104,11 @@
 {
 	cpumask_t mask;
 
-	cpus_and(mask, cpu_online_map, *affinity);
-	if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask))
+	cpumask_and(&mask, cpu_online_mask, affinity);
+	if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
 		return boot_cpu_id;
 	else
-		return first_cpu(mask);
+		return cpumask_first(&mask);
 }
 #else
 #define irq_choose_cpu(affinity) boot_cpu_id
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index aba6b95..19f5605 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -45,7 +45,6 @@
 
 void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
 {
-	struct leon_pci_info *info = pbus->sysdata;
 	struct pci_dev *dev;
 	int i, has_io, has_mem;
 	u16 cmd;
@@ -111,18 +110,6 @@
 	return pci_enable_resources(dev, mask);
 }
 
-struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
-	/*
-	 * Currently the OpenBoot nodes are not connected with the PCI device,
-	 * this is because the LEON PROM does not create PCI nodes. Eventually
-	 * this will change and the same approach as pcic.c can be used to
-	 * match PROM nodes with pci devices.
-	 */
-	return NULL;
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
 void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
 {
 #ifdef CONFIG_PCI_DEBUG
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index af5755d..7661e84 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -128,7 +128,8 @@
 }
 
 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
-				   dma_addr_t *dma_addrp, gfp_t gfp)
+				   dma_addr_t *dma_addrp, gfp_t gfp,
+				   struct dma_attrs *attrs)
 {
 	unsigned long flags, order, first_page, npages, n;
 	struct iommu *iommu;
@@ -198,7 +199,7 @@
 }
 
 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
-				 dma_addr_t dvma)
+				 dma_addr_t dvma, struct dma_attrs *attrs)
 {
 	struct pci_pbm_info *pbm;
 	struct iommu *iommu;
@@ -527,8 +528,8 @@
 }
 
 static struct dma_map_ops sun4v_dma_ops = {
-	.alloc_coherent			= dma_4v_alloc_coherent,
-	.free_coherent			= dma_4v_free_coherent,
+	.alloc				= dma_4v_alloc_coherent,
+	.free				= dma_4v_free_coherent,
 	.map_page			= dma_4v_map_page,
 	.unmap_page			= dma_4v_unmap_page,
 	.map_sg				= dma_4v_map_sg,
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 1333879..540b2fe 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -14,6 +14,7 @@
 #include <asm/sbi.h>
 #include <asm/mmu.h>
 #include <asm/tlbflush.h>
+#include <asm/switch_to.h>
 #include <asm/cacheflush.h>
 
 #include "kernel.h"
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 5947686..02db9a0 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -10,6 +10,7 @@
 #include <linux/cpu.h>
 
 #include <asm/cacheflush.h>
+#include <asm/switch_to.h>
 #include <asm/tlbflush.h>
 
 #include "irq.h"
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7705c67..df3155a 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -225,6 +225,8 @@
 	unsigned long g2;
 	int from_user = !(regs->psr & PSR_PS);
 	int fault, code;
+	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+			      (write ? FAULT_FLAG_WRITE : 0));
 
 	if(text_fault)
 		address = regs->pc;
@@ -251,6 +253,7 @@
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
+retry:
 	down_read(&mm->mmap_sem);
 
 	/*
@@ -289,7 +292,11 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -297,13 +304,29 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR) {
-		current->maj_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-	} else {
-		current->min_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR) {
+			current->maj_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+				      1, regs, address);
+		} else {
+			current->min_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+				      1, regs, address);
+		}
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
 	}
+
 	up_read(&mm->mmap_sem);
 	return;
 
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 504c062..1fe0429 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -279,6 +279,7 @@
 	unsigned int insn = 0;
 	int si_code, fault_code, fault;
 	unsigned long address, mm_rss;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 	fault_code = get_thread_fault_code();
 
@@ -333,6 +334,8 @@
 			insn = get_fault_insn(regs, insn);
 			goto handle_kernel_fault;
 		}
+
+retry:
 		down_read(&mm->mmap_sem);
 	}
 
@@ -423,7 +426,12 @@
 			goto bad_area;
 	}
 
-	fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
+	flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -431,12 +439,27 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR) {
-		current->maj_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-	} else {
-		current->min_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR) {
+			current->maj_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+				      1, regs, address);
+		} else {
+			current->min_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+				      1, regs, address);
+		}
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
 	}
 	up_read(&mm->mmap_sem);
 
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 11270ca..96033e2 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -12,7 +12,7 @@
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_IRQ_SHOW
 	select SYS_HYPERVISOR
-	select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
 # FIXME: investigate whether we need/want these options.
 #	select HAVE_IOREMAP_PROT
@@ -69,6 +69,9 @@
 config ARCH_DMA_ADDR_T_64BIT
 	def_bool y
 
+config NEED_DMA_MAP_STATE
+	def_bool y
+
 config LOCKDEP_SUPPORT
 	def_bool y
 
@@ -118,7 +121,7 @@
 
 config ARCH_DEFCONFIG
 	string
-	default "arch/tile/configs/tile_defconfig" if !TILEGX
+	default "arch/tile/configs/tilepro_defconfig" if !TILEGX
 	default "arch/tile/configs/tilegx_defconfig" if TILEGX
 
 source "init/Kconfig"
@@ -240,6 +243,7 @@
 
 config PAGE_OFFSET
 	hex
+	depends on !64BIT
 	default 0xF0000000 if VMSPLIT_3_75G
 	default 0xE0000000 if VMSPLIT_3_5G
 	default 0xB0000000 if VMSPLIT_2_75G
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 17acce7..9520bc5 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -30,7 +30,8 @@
 KBUILD_CFLAGS   += $(CONFIG_DEBUG_EXTRA_FLAGS)
 endif
 
-LIBGCC_PATH     := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+LIBGCC_PATH     := \
+  $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
 
 # Provide the path to use for "make defconfig".
 KBUILD_DEFCONFIG := $(ARCH)_defconfig
@@ -53,8 +54,6 @@
 # See arch/tile/Kbuild for content of core part of the kernel
 core-y		+= arch/tile/
 
-core-$(CONFIG_KVM) += arch/tile/kvm/
-
 ifdef TILERA_ROOT
 INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
 endif
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index f548efe..d6ba449 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -60,8 +60,8 @@
 	_concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
 #define SPR_IPI_EVENT_RESET_K \
 	_concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
-#define SPR_IPI_MASK_SET_K \
-	_concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_SET_K \
+	_concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,)
 #define INT_IPI_K \
 	_concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
 
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index bb696da..f246142 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -17,6 +17,8 @@
 #ifndef _ASM_TILE_ATOMIC_H
 #define _ASM_TILE_ATOMIC_H
 
+#include <asm/cmpxchg.h>
+
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
@@ -121,54 +123,6 @@
  */
 #define atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
 
-/* Nonexistent functions intended to cause link errors. */
-extern unsigned long __xchg_called_with_bad_pointer(void);
-extern unsigned long __cmpxchg_called_with_bad_pointer(void);
-
-#define xchg(ptr, x)							\
-	({								\
-		typeof(*(ptr)) __x;					\
-		switch (sizeof(*(ptr))) {				\
-		case 4:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
-				(atomic_t *)(ptr),			\
-				(u32)(typeof((x)-(x)))(x));		\
-			break;						\
-		case 8:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
-				(atomic64_t *)(ptr),			\
-				(u64)(typeof((x)-(x)))(x));		\
-			break;						\
-		default:						\
-			__xchg_called_with_bad_pointer();		\
-		}							\
-		__x;							\
-	})
-
-#define cmpxchg(ptr, o, n)						\
-	({								\
-		typeof(*(ptr)) __x;					\
-		switch (sizeof(*(ptr))) {				\
-		case 4:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
-				(atomic_t *)(ptr),			\
-				(u32)(typeof((o)-(o)))(o),		\
-				(u32)(typeof((n)-(n)))(n));		\
-			break;						\
-		case 8:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
-				(atomic64_t *)(ptr),			\
-				(u64)(typeof((o)-(o)))(o),		\
-				(u64)(typeof((n)-(n)))(n));		\
-			break;						\
-		default:						\
-			__cmpxchg_called_with_bad_pointer();		\
-		}							\
-		__x;							\
-	})
-
-#define tas(ptr) (xchg((ptr), 1))
-
 #endif /* __ASSEMBLY__ */
 
 #ifndef __tilegx__
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 466dc4a..54d1da8 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -200,7 +200,7 @@
  * @u: ...unless v is equal to u.
  *
  * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
+ * Returns non-zero if @v was not @u, and zero otherwise.
  */
 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
 {
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index 58d021a..60b87ee 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -38,10 +38,10 @@
 
 static inline void change_bit(unsigned nr, volatile unsigned long *addr)
 {
-	unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
-	long guess, oldval;
+	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
+	unsigned long guess, oldval;
 	addr += nr / BITS_PER_LONG;
-	old = *addr;
+	oldval = *addr;
 	do {
 		guess = oldval;
 		oldval = atomic64_cmpxchg((atomic64_t *)addr,
@@ -85,7 +85,7 @@
 				      volatile unsigned long *addr)
 {
 	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
-	long guess, oldval = *addr;
+	unsigned long guess, oldval;
 	addr += nr / BITS_PER_LONG;
 	oldval = *addr;
 	do {
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
new file mode 100644
index 0000000..276f067
--- /dev/null
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -0,0 +1,73 @@
+/*
+ * cmpxchg.h -- forked from asm/atomic.h with this copyright:
+ *
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ */
+
+#ifndef _ASM_TILE_CMPXCHG_H
+#define _ASM_TILE_CMPXCHG_H
+
+#ifndef __ASSEMBLY__
+
+/* Nonexistent functions intended to cause link errors. */
+extern unsigned long __xchg_called_with_bad_pointer(void);
+extern unsigned long __cmpxchg_called_with_bad_pointer(void);
+
+#define xchg(ptr, x)							\
+	({								\
+		typeof(*(ptr)) __x;					\
+		switch (sizeof(*(ptr))) {				\
+		case 4:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
+				(atomic_t *)(ptr),			\
+				(u32)(typeof((x)-(x)))(x));		\
+			break;						\
+		case 8:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
+				(atomic64_t *)(ptr),			\
+				(u64)(typeof((x)-(x)))(x));		\
+			break;						\
+		default:						\
+			__xchg_called_with_bad_pointer();		\
+		}							\
+		__x;							\
+	})
+
+#define cmpxchg(ptr, o, n)						\
+	({								\
+		typeof(*(ptr)) __x;					\
+		switch (sizeof(*(ptr))) {				\
+		case 4:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
+				(atomic_t *)(ptr),			\
+				(u32)(typeof((o)-(o)))(o),		\
+				(u32)(typeof((n)-(n)))(n));		\
+			break;						\
+		case 8:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
+				(atomic64_t *)(ptr),			\
+				(u64)(typeof((o)-(o)))(o),		\
+				(u64)(typeof((n)-(n)))(n));		\
+			break;						\
+		default:						\
+			__cmpxchg_called_with_bad_pointer();		\
+		}							\
+		__x;							\
+	})
+
+#define tas(ptr) (xchg((ptr), 1))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_TILE_CMPXCHG_H */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index f80f8ce..33cff9a 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -21,7 +21,7 @@
 #define NR_IRQS 32
 
 /* IRQ numbers used for linux IPIs. */
-#define IRQ_RESCHEDULE 1
+#define IRQ_RESCHEDULE 0
 
 #define irq_canonicalize(irq)   (irq)
 
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 72be590..5f8b6a0 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -137,7 +137,7 @@
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
 	__insn_mf();
-	rw->lock = 0;
+	__insn_exch4(&rw->lock, 0);  /* Avoid waiting in the write buffer. */
 }
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 4d97a2d..0e9d382a 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -25,7 +25,6 @@
 struct KBacktraceIterator {
 	BacktraceIterator it;
 	struct task_struct *task;     /* task we are backtracing */
-	pte_t *pgtable;		      /* page table for user space access */
 	int end;		      /* iteration complete. */
 	int new_context;              /* new context is starting */
 	int profile;                  /* profiling, so stop on async intrpt */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 5f20f92..e28c3df4 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -64,7 +64,11 @@
 
 
 #ifdef __tilegx__
+/* kernel/single_step.c */
 void gx_singlestep_handle(struct pt_regs *, int fault_num);
+
+/* kernel/intvec_64.S */
+void fill_ra_stack(void);
 #endif
 
-#endif /* _ASM_TILE_SYSCALLS_H */
+#endif /* _ASM_TILE_TRAPS_H */
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 431e9ae6..ec91568 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -85,6 +85,7 @@
 /* Loop forever on a nap during SMP boot. */
 STD_ENTRY(smp_nap)
 	nap
+	nop       /* avoid provoking the icache prefetch with a jump */
 	j smp_nap /* we are not architecturally guaranteed not to exit nap */
 	jrp lr    /* clue in the backtracer */
 	STD_ENDPROC(smp_nap)
@@ -105,5 +106,6 @@
 	.global _cpu_idle_nap
 _cpu_idle_nap:
 	nap
+	nop       /* avoid provoking the icache prefetch with a jump */
 	jrp lr
 	STD_ENDPROC(_cpu_idle)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index aecc8ed..5d56a1e 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -799,6 +799,10 @@
  * This routine takes a boolean in r30 indicating if this is an NMI.
  * If so, we also expect a boolean in r31 indicating whether to
  * re-enable the oprofile interrupts.
+ *
+ * Note that .Lresume_userspace is jumped to directly in several
+ * places, and we need to make sure r30 is set correctly in those
+ * callers as well.
  */
 STD_ENTRY(interrupt_return)
 	/* If we're resuming to kernel space, don't check thread flags. */
@@ -1237,7 +1241,10 @@
 	bzt     r30, 1f
 	jal	do_syscall_trace
 	FEEDBACK_REENTER(handle_syscall)
-1:	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+1:	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 
 .Linvalid_syscall:
 	/* Report an invalid syscall back to the user program */
@@ -1246,7 +1253,10 @@
 	 movei  r28, -ENOSYS
 	}
 	sw      r29, r28
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(handle_syscall)
 
 	/* Return the address for oprofile to suppress in backtraces. */
@@ -1262,7 +1272,10 @@
 	jal     sim_notify_fork
 	jal     schedule_tail
 	FEEDBACK_REENTER(ret_from_fork)
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(ret_from_fork)
 
 	/*
@@ -1376,7 +1389,10 @@
 
 	jal     send_sigtrap    /* issue a SIGTRAP */
 	FEEDBACK_REENTER(handle_ill)
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 
 .Ldispatch_normal_ill:
 	{
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 79c93e1..49d9d66 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -22,6 +22,7 @@
 #include <asm/irqflags.h>
 #include <asm/asm-offsets.h>
 #include <asm/types.h>
+#include <asm/signal.h>
 #include <hv/hypervisor.h>
 #include <arch/abi.h>
 #include <arch/interrupts.h>
@@ -605,6 +606,10 @@
  * This routine takes a boolean in r30 indicating if this is an NMI.
  * If so, we also expect a boolean in r31 indicating whether to
  * re-enable the oprofile interrupts.
+ *
+ * Note that .Lresume_userspace is jumped to directly in several
+ * places, and we need to make sure r30 is set correctly in those
+ * callers as well.
  */
 STD_ENTRY(interrupt_return)
 	/* If we're resuming to kernel space, don't check thread flags. */
@@ -1039,11 +1044,28 @@
 
 	/* Do syscall trace again, if requested. */
 	ld	r30, r31
-	andi    r30, r30, _TIF_SYSCALL_TRACE
-	beqzt	r30, 1f
+	andi    r0, r30, _TIF_SYSCALL_TRACE
+	{
+	 andi    r0, r30, _TIF_SINGLESTEP
+	 beqzt   r0, 1f
+	}
 	jal	do_syscall_trace
 	FEEDBACK_REENTER(handle_syscall)
-1:	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	andi    r0, r30, _TIF_SINGLESTEP
+
+1:	beqzt	r0, 2f
+
+	/* Single stepping -- notify ptrace. */
+	{
+	 movei   r0, SIGTRAP
+	 jal     ptrace_notify
+	}
+	FEEDBACK_REENTER(handle_syscall)
+
+2:	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 
 .Lcompat_syscall:
 	/*
@@ -1077,7 +1099,10 @@
 	 movei  r28, -ENOSYS
 	}
 	st      r29, r28
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(handle_syscall)
 
 	/* Return the address for oprofile to suppress in backtraces. */
@@ -1093,7 +1118,10 @@
 	jal     sim_notify_fork
 	jal     schedule_tail
 	FEEDBACK_REENTER(ret_from_fork)
-	j       .Lresume_userspace
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(ret_from_fork)
 
 /* Various stub interrupt handlers and syscall handlers */
@@ -1156,6 +1184,18 @@
 	push_extra_callee_saves r0
 	j       do_trap
 
+/* Fill the return address stack with nonzero entries. */
+STD_ENTRY(fill_ra_stack)
+	{
+	 move	r0, lr
+	 jal	1f
+	}
+1:	jal	2f
+2:	jal	3f
+3:	jal	4f
+4:	jrp	r0
+	STD_ENDPROC(fill_ra_stack)
+
 /* Include .intrpt1 array of interrupt vectors */
 	.section ".intrpt1", "ax"
 
@@ -1166,7 +1206,7 @@
 #define do_hardwall_trap bad_intr
 #endif
 
-	int_hand     INT_MEM_ERROR, MEM_ERROR, bad_intr
+	int_hand     INT_MEM_ERROR, MEM_ERROR, do_trap
 	int_hand     INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
 #if CONFIG_KERNEL_PL == 2
 	int_hand     INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index b90ab99..98d4769 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -67,6 +67,8 @@
 	area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
 	if (!area)
 		goto error;
+	area->nr_pages = npages;
+	area->pages = pages;
 
 	if (map_vm_area(area, prot_rwx, &pages)) {
 		vunmap(area->addr);
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 7a93270..446a7f5 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -146,7 +146,6 @@
 	},
 	{}
 };
-#endif
 
 static struct ctl_path tile_path[] = {
 	{ .procname = "tile" },
@@ -155,10 +154,9 @@
 
 static int __init proc_sys_tile_init(void)
 {
-#ifndef __tilegx__  /* FIXME: GX: no support for unaligned access yet */
 	register_sysctl_paths(tile_path, unaligned_table);
-#endif
 	return 0;
 }
 
 arch_initcall(proc_sys_tile_init);
+#endif
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 30caeca..2d5ef61 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -28,6 +28,7 @@
 #include <linux/tracehook.h>
 #include <linux/signal.h>
 #include <asm/stack.h>
+#include <asm/switch_to.h>
 #include <asm/homecache.h>
 #include <asm/syscalls.h>
 #include <asm/traps.h>
@@ -285,7 +286,7 @@
 	static struct task_struct corrupt = { .comm = "<corrupt>" };
 	struct task_struct *tsk = current;
 	if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
-		     (void *)tsk > high_memory ||
+		     (high_memory && (void *)tsk > high_memory) ||
 		     ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
 		pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
 		tsk = &corrupt;
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 5f85d8b..bff23f4 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -103,13 +103,11 @@
 
 static int __init setup_maxmem(char *str)
 {
-	long maxmem_mb;
-	if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 ||
-	    maxmem_mb == 0)
+	unsigned long long maxmem;
+	if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
 		return -EINVAL;
 
-	maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) <<
-		(HPAGE_SHIFT - PAGE_SHIFT);
+	maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
 	pr_info("Forcing RAM used to no more than %dMB\n",
 	       maxmem_pfn >> (20 - PAGE_SHIFT));
 	return 0;
@@ -119,14 +117,15 @@
 static int __init setup_maxnodemem(char *str)
 {
 	char *endp;
-	long maxnodemem_mb, node;
+	unsigned long long maxnodemem;
+	long node;
 
 	node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
-	if (node >= MAX_NUMNODES || *endp != ':' ||
-	    strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
+	if (node >= MAX_NUMNODES || *endp != ':')
 		return -EINVAL;
 
-	maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) <<
+	maxnodemem = memparse(endp+1, NULL);
+	maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
 		(HPAGE_SHIFT - PAGE_SHIFT);
 	pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
 	       node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
@@ -913,6 +912,13 @@
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
+/*
+ * Note that the kernel can potentially support other compression
+ * techniques than gz, though we don't do so by default.  If we ever
+ * decide to do so we can either look for other filename extensions,
+ * or just allow a file with this name to be compressed with an
+ * arbitrary compressor (somewhat counterintuitively).
+ */
 static int __initdata set_initramfs_file;
 static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
 
@@ -928,9 +934,9 @@
 early_param("initramfs_file", setup_initramfs_file);
 
 /*
- * We look for an additional "initramfs.cpio.gz" file in the hvfs.
+ * We look for an "initramfs.cpio.gz" file in the hvfs.
  * If there is one, we allocate some memory for it and it will be
- * unpacked to the initramfs after any built-in initramfs_data.
+ * unpacked to the initramfs.
  */
 static void __init load_hv_initrd(void)
 {
@@ -1100,7 +1106,7 @@
 
 /*
  * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
- * flush on our behalf.  It is set to cpu_possible_map OR'ed with
+ * flush on our behalf.  It is set to cpu_possible_mask OR'ed with
  * hash_for_home_map, and it is what should be passed to
  * hv_flush_remote() to flush all caches.  Note that if there are
  * dedicated hypervisor driver tiles that have authorized use of their
@@ -1186,7 +1192,7 @@
 			      sizeof(cpu_lotar_map));
 	if (rc < 0) {
 		pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
-		cpu_lotar_map = cpu_possible_map;
+		cpu_lotar_map = *cpu_possible_mask;
 	}
 
 #if CHIP_HAS_CBOX_HOME_MAP()
@@ -1196,9 +1202,9 @@
 			      sizeof(hash_for_home_map));
 	if (rc < 0)
 		early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
-	cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map);
+	cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
 #else
-	cpu_cacheable_map = cpu_possible_map;
+	cpu_cacheable_map = *cpu_possible_mask;
 #endif
 }
 
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index bc1eb58..9efbc13 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -153,6 +153,25 @@
 	if (((unsigned long)addr % size) == 0)
 		return bundle;
 
+	/*
+	 * Return SIGBUS with the unaligned address, if requested.
+	 * Note that we return SIGBUS even for completely invalid addresses
+	 * as long as they are in fact unaligned; this matches what the
+	 * tilepro hardware would be doing, if it could provide us with the
+	 * actual bad address in an SPR, which it doesn't.
+	 */
+	if (unaligned_fixup == 0) {
+		siginfo_t info = {
+			.si_signo = SIGBUS,
+			.si_code = BUS_ADRALN,
+			.si_addr = addr
+		};
+		trace_unhandled_signal("unaligned trap", regs,
+				       (unsigned long)addr, SIGBUS);
+		force_sig_info(info.si_signo, &info, current);
+		return (tilepro_bundle_bits) 0;
+	}
+
 #ifndef __LITTLE_ENDIAN
 # error We assume little-endian representation with copy_xx_user size 2 here
 #endif
@@ -192,18 +211,6 @@
 		return (tile_bundle_bits) 0;
 	}
 
-	if (unaligned_fixup == 0) {
-		siginfo_t info = {
-			.si_signo = SIGBUS,
-			.si_code = BUS_ADRALN,
-			.si_addr = addr
-		};
-		trace_unhandled_signal("unaligned trap", regs,
-				       (unsigned long)addr, SIGBUS);
-		force_sig_info(info.si_signo, &info, current);
-		return (tile_bundle_bits) 0;
-	}
-
 	if (unaligned_printk || unaligned_fixup_count == 0) {
 		pr_info("Process %d/%s: PC %#lx: Fixup of"
 			" unaligned %s at %#lx.\n",
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index a44e103..91da0f7 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -103,7 +103,7 @@
 	set_cpu_online(smp_processor_id(), 0);
 	arch_local_irq_disable_all();
 	for (;;)
-		asm("nap");
+		asm("nap; nop");
 }
 
 /* This function calls the 'stop' function on all other CPUs in the system. */
@@ -113,6 +113,12 @@
 	send_IPI_allbutself(MSG_TAG_STOP_CPU);
 }
 
+/* On panic, just wait; we may get an smp_send_stop() later on. */
+void panic_smp_self_stop(void)
+{
+	while (1)
+		asm("nap; nop");
+}
 
 /*
  * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index b949edc..172aef7 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -196,6 +196,8 @@
 	/* This must be done before setting cpu_online_mask */
 	wmb();
 
+	notify_cpu_starting(smp_processor_id());
+
 	/*
 	 * We need to hold call_lock, so there is no inconsistency
 	 * between the time smp_call_function() determines number of
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 37ee4d0..b2f44c2 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -21,10 +21,12 @@
 #include <linux/stacktrace.h>
 #include <linux/uaccess.h>
 #include <linux/mmzone.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
 #include <asm/backtrace.h>
 #include <asm/page.h>
-#include <asm/tlbflush.h>
 #include <asm/ucontext.h>
+#include <asm/switch_to.h>
 #include <asm/sigframe.h>
 #include <asm/stack.h>
 #include <arch/abi.h>
@@ -44,72 +46,23 @@
 	return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
 }
 
-/* Is address valid for reading? */
-static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
-{
-	HV_PTE *l1_pgtable = kbt->pgtable;
-	HV_PTE *l2_pgtable;
-	unsigned long pfn;
-	HV_PTE pte;
-	struct page *page;
-
-	if (l1_pgtable == NULL)
-		return 0;	/* can't read user space in other tasks */
-
-#ifdef CONFIG_64BIT
-	/* Find the real l1_pgtable by looking in the l0_pgtable. */
-	pte = l1_pgtable[HV_L0_INDEX(address)];
-	if (!hv_pte_get_present(pte))
-		return 0;
-	pfn = hv_pte_get_pfn(pte);
-	if (pte_huge(pte)) {
-		if (!pfn_valid(pfn)) {
-			pr_err("L0 huge page has bad pfn %#lx\n", pfn);
-			return 0;
-		}
-		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
-	}
-	page = pfn_to_page(pfn);
-	BUG_ON(PageHighMem(page));  /* No HIGHMEM on 64-bit. */
-	l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
-#endif
-	pte = l1_pgtable[HV_L1_INDEX(address)];
-	if (!hv_pte_get_present(pte))
-		return 0;
-	pfn = hv_pte_get_pfn(pte);
-	if (pte_huge(pte)) {
-		if (!pfn_valid(pfn)) {
-			pr_err("huge page has bad pfn %#lx\n", pfn);
-			return 0;
-		}
-		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
-	}
-
-	page = pfn_to_page(pfn);
-	if (PageHighMem(page)) {
-		pr_err("L2 page table not in LOWMEM (%#llx)\n",
-		       HV_PFN_TO_CPA(pfn));
-		return 0;
-	}
-	l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
-	pte = l2_pgtable[HV_L2_INDEX(address)];
-	return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
-}
-
 /* Callback for backtracer; basically a glorified memcpy */
 static bool read_memory_func(void *result, unsigned long address,
 			     unsigned int size, void *vkbt)
 {
 	int retval;
 	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
+
+	if (address == 0)
+		return 0;
 	if (__kernel_text_address(address)) {
 		/* OK to read kernel code. */
 	} else if (address >= PAGE_OFFSET) {
 		/* We only tolerate kernel-space reads of this task's stack */
 		if (!in_kernel_stack(kbt, address))
 			return 0;
-	} else if (!valid_address(kbt, address)) {
-		return 0;	/* invalid user-space address */
+	} else if (!kbt->is_current) {
+		return 0;	/* can't read from other user address spaces */
 	}
 	pagefault_disable();
 	retval = __copy_from_user_inatomic(result,
@@ -127,6 +80,8 @@
 	unsigned long sp = kbt->it.sp;
 	struct pt_regs *p;
 
+	if (sp % sizeof(long) != 0)
+		return NULL;
 	if (!in_kernel_stack(kbt, sp))
 		return NULL;
 	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
@@ -169,27 +124,27 @@
 }
 
 /* Return a pt_regs pointer for a valid signal handler frame */
-static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
+static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
+				      struct rt_sigframe* kframe)
 {
 	BacktraceIterator *b = &kbt->it;
 
-	if (b->pc == VDSO_BASE) {
-		struct rt_sigframe *frame;
-		unsigned long sigframe_top =
-			b->sp + sizeof(struct rt_sigframe) - 1;
-		if (!valid_address(kbt, b->sp) ||
-		    !valid_address(kbt, sigframe_top)) {
-			if (kbt->verbose)
-				pr_err("  (odd signal: sp %#lx?)\n",
-				       (unsigned long)(b->sp));
+	if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
+	    b->sp % sizeof(long) == 0) {
+		int retval;
+		pagefault_disable();
+		retval = __copy_from_user_inatomic(
+			kframe, (void __user __force *)b->sp,
+			sizeof(*kframe));
+		pagefault_enable();
+		if (retval != 0 ||
+		    (unsigned int)(kframe->info.si_signo) >= _NSIG)
 			return NULL;
-		}
-		frame = (struct rt_sigframe *)b->sp;
 		if (kbt->verbose) {
 			pr_err("  <received signal %d>\n",
-			       frame->info.si_signo);
+			       kframe->info.si_signo);
 		}
-		return (struct pt_regs *)&frame->uc.uc_mcontext;
+		return (struct pt_regs *)&kframe->uc.uc_mcontext;
 	}
 	return NULL;
 }
@@ -202,10 +157,11 @@
 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
 {
 	struct pt_regs *p;
+	struct rt_sigframe kframe;
 
 	p = valid_fault_handler(kbt);
 	if (p == NULL)
-		p = valid_sigframe(kbt);
+		p = valid_sigframe(kbt, &kframe);
 	if (p == NULL)
 		return 0;
 	backtrace_init(&kbt->it, read_memory_func, kbt,
@@ -265,41 +221,19 @@
 
 	/*
 	 * Set up callback information.  We grab the kernel stack base
-	 * so we will allow reads of that address range, and if we're
-	 * asking about the current process we grab the page table
-	 * so we can check user accesses before trying to read them.
-	 * We flush the TLB to avoid any weird skew issues.
+	 * so we will allow reads of that address range.
 	 */
-	is_current = (t == NULL);
+	is_current = (t == NULL || t == current);
 	kbt->is_current = is_current;
 	if (is_current)
 		t = validate_current();
 	kbt->task = t;
-	kbt->pgtable = NULL;
 	kbt->verbose = 0;   /* override in caller if desired */
 	kbt->profile = 0;   /* override in caller if desired */
 	kbt->end = KBT_ONGOING;
-	kbt->new_context = 0;
-	if (is_current) {
-		HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
-		if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
-			/*
-			 * Not just an optimization: this also allows
-			 * this to work at all before va/pa mappings
-			 * are set up.
-			 */
-			kbt->pgtable = swapper_pg_dir;
-		} else {
-			struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
-			if (!PageHighMem(page))
-				kbt->pgtable = __va(pgdir_pa);
-			else
-				pr_err("page table not in LOWMEM"
-				       " (%#llx)\n", pgdir_pa);
-		}
-		local_flush_tlb_all();
+	kbt->new_context = 1;
+	if (is_current)
 		validate_stack(regs);
-	}
 
 	if (regs == NULL) {
 		if (is_current || t->state == TASK_RUNNING) {
@@ -345,6 +279,78 @@
 }
 EXPORT_SYMBOL(KBacktraceIterator_next);
 
+static void describe_addr(struct KBacktraceIterator *kbt,
+			  unsigned long address,
+			  int have_mmap_sem, char *buf, size_t bufsize)
+{
+	struct vm_area_struct *vma;
+	size_t namelen, remaining;
+	unsigned long size, offset, adjust;
+	char *p, *modname;
+	const char *name;
+	int rc;
+
+	/*
+	 * Look one byte back for every caller frame (i.e. those that
+	 * aren't a new context) so we look up symbol data for the
+	 * call itself, not the following instruction, which may be on
+	 * a different line (or in a different function).
+	 */
+	adjust = !kbt->new_context;
+	address -= adjust;
+
+	if (address >= PAGE_OFFSET) {
+		/* Handle kernel symbols. */
+		BUG_ON(bufsize < KSYM_NAME_LEN);
+		name = kallsyms_lookup(address, &size, &offset,
+				       &modname, buf);
+		if (name == NULL) {
+			buf[0] = '\0';
+			return;
+		}
+		namelen = strlen(buf);
+		remaining = (bufsize - 1) - namelen;
+		p = buf + namelen;
+		rc = snprintf(p, remaining, "+%#lx/%#lx ",
+			      offset + adjust, size);
+		if (modname && rc < remaining)
+			snprintf(p + rc, remaining - rc, "[%s] ", modname);
+		buf[bufsize-1] = '\0';
+		return;
+	}
+
+	/* If we don't have the mmap_sem, we can't show any more info. */
+	buf[0] = '\0';
+	if (!have_mmap_sem)
+		return;
+
+	/* Find vma info. */
+	vma = find_vma(kbt->task->mm, address);
+	if (vma == NULL || address < vma->vm_start) {
+		snprintf(buf, bufsize, "[unmapped address] ");
+		return;
+	}
+
+	if (vma->vm_file) {
+		char *s;
+		p = d_path(&vma->vm_file->f_path, buf, bufsize);
+		if (IS_ERR(p))
+			p = "?";
+		s = strrchr(p, '/');
+		if (s)
+			p = s+1;
+	} else {
+		p = "anon";
+	}
+
+	/* Generate a string description of the vma info. */
+	namelen = strlen(p);
+	remaining = (bufsize - 1) - namelen;
+	memmove(buf, p, namelen);
+	snprintf(buf + namelen, remaining, "[%lx+%lx] ",
+		 vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
 /*
  * This method wraps the backtracer's more generic support.
  * It is only invoked from the architecture-specific code; show_stack()
@@ -353,6 +359,7 @@
 void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
 {
 	int i;
+	int have_mmap_sem = 0;
 
 	if (headers) {
 		/*
@@ -369,31 +376,16 @@
 	kbt->verbose = 1;
 	i = 0;
 	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
-		char *modname;
-		const char *name;
-		unsigned long address = kbt->it.pc;
-		unsigned long offset, size;
 		char namebuf[KSYM_NAME_LEN+100];
+		unsigned long address = kbt->it.pc;
 
-		if (address >= PAGE_OFFSET)
-			name = kallsyms_lookup(address, &size, &offset,
-					       &modname, namebuf);
-		else
-			name = NULL;
+		/* Try to acquire the mmap_sem as we pass into userspace. */
+		if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
+			have_mmap_sem =
+				down_read_trylock(&kbt->task->mm->mmap_sem);
 
-		if (!name)
-			namebuf[0] = '\0';
-		else {
-			size_t namelen = strlen(namebuf);
-			size_t remaining = (sizeof(namebuf) - 1) - namelen;
-			char *p = namebuf + namelen;
-			int rc = snprintf(p, remaining, "+%#lx/%#lx ",
-					  offset, size);
-			if (modname && rc < remaining)
-				snprintf(p + rc, remaining - rc,
-					 "[%s] ", modname);
-			namebuf[sizeof(namebuf)-1] = '\0';
-		}
+		describe_addr(kbt, address, have_mmap_sem,
+			      namebuf, sizeof(namebuf));
 
 		pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
 		       i++, address, namebuf, (unsigned long)(kbt->it.sp));
@@ -408,6 +400,8 @@
 		pr_err("Stack dump stopped; next frame identical to this one\n");
 	if (headers)
 		pr_err("Stack dump complete\n");
+	if (have_mmap_sem)
+		up_read(&kbt->task->mm->mmap_sem);
 }
 EXPORT_SYMBOL(tile_show_stack);
 
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 2bb6602..73cff81 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -200,7 +200,7 @@
 {
 	siginfo_t info = { 0 };
 	int signo, code;
-	unsigned long address;
+	unsigned long address = 0;
 	bundle_bits instr;
 
 	/* Re-enable interrupts. */
@@ -223,6 +223,10 @@
 	}
 
 	switch (fault_num) {
+	case INT_MEM_ERROR:
+		signo = SIGBUS;
+		code = BUS_OBJERR;
+		break;
 	case INT_ILL:
 		if (copy_from_user(&instr, (void __user *)regs->pc,
 				   sizeof(instr))) {
@@ -289,7 +293,10 @@
 		address = regs->pc;
 		break;
 #ifdef __tilegx__
-	case INT_ILL_TRANS:
+	case INT_ILL_TRANS: {
+		/* Avoid a hardware erratum with the return address stack. */
+		fill_ra_stack();
+
 		signo = SIGSEGV;
 		code = SEGV_MAPERR;
 		if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
@@ -297,6 +304,7 @@
 		else
 			address = 0;  /* FIXME: GX: single-step for address */
 		break;
+	}
 #endif
 	default:
 		panic("Unexpected do_trap interrupt number %d", fault_num);
@@ -308,7 +316,8 @@
 	info.si_addr = (void __user *)address;
 	if (signo == SIGILL)
 		info.si_trapno = fault_num;
-	trace_unhandled_signal("trap", regs, address, signo);
+	if (signo != SIGTRAP)
+		trace_unhandled_signal("trap", regs, address, signo);
 	force_sig_info(signo, &info, current);
 }
 
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 0c26086..985f598 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -7,6 +7,7 @@
 	strchr_$(BITS).o strlen_$(BITS).o
 
 ifeq ($(CONFIG_TILEGX),y)
+CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer
 lib-y += memcpy_user_64.o
 else
 lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index 8928aac..db4fb89 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -39,7 +39,21 @@
 {
 	char *p, *base;
 	size_t step_size, load_count;
+
+	/*
+	 * On TILEPro the striping granularity is a fixed 8KB; on
+	 * TILE-Gx it is configurable, and we rely on the fact that
+	 * the hypervisor always configures maximum striping, so that
+	 * bits 9 and 10 of the PA are part of the stripe function, so
+	 * every 512 bytes we hit a striping boundary.
+	 *
+	 */
+#ifdef __tilegx__
+	const unsigned long STRIPE_WIDTH = 512;
+#else
 	const unsigned long STRIPE_WIDTH = 8192;
+#endif
+
 #ifdef __tilegx__
 	/*
 	 * On TILE-Gx, we must disable the dstream prefetcher before doing
@@ -74,7 +88,7 @@
 	 * memory, that one load would be sufficient, but since we may
 	 * be, we also need to back up to the last load issued to
 	 * another memory controller, which would be the point where
-	 * we crossed an 8KB boundary (the granularity of striping
+	 * we crossed a "striping" boundary (the granularity of striping
 	 * across memory controllers).  Keep backing up and doing this
 	 * until we are before the beginning of the buffer, or have
 	 * hit all the controllers.
@@ -88,12 +102,22 @@
 	 * every cache line on a full memory stripe on each
 	 * controller" that we simply do that, to simplify the logic.
 	 *
-	 * FIXME: See bug 9535 for some issues with this code.
+	 * On TILE-Gx the hash-for-home function is much more complex,
+	 * with the upshot being we can't readily guarantee we have
+	 * hit both entries in the 128-entry AMT that were hit by any
+	 * load in the entire range, so we just re-load them all.
+	 * With larger buffers, we may want to consider using a hypervisor
+	 * trap to issue loads directly to each hash-for-home tile for
+	 * each controller (doing it from Linux would trash the TLB).
 	 */
 	if (hfh) {
 		step_size = L2_CACHE_BYTES;
+#ifdef __tilegx__
+		load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
+#else
 		load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
 			      (1 << CHIP_LOG_NUM_MSHIMS());
+#endif
 	} else {
 		step_size = STRIPE_WIDTH;
 		load_count = (1 << CHIP_LOG_NUM_MSHIMS());
@@ -109,7 +133,7 @@
 
 	/* Figure out how far back we need to go. */
 	base = p - (step_size * (load_count - 2));
-	if ((long)base < (long)buffer)
+	if ((unsigned long)base < (unsigned long)buffer)
 		base = buffer;
 
 	/*
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 4763b3a..37440ca 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -14,7 +14,13 @@
  * Do memcpy(), but trap and return "n" when a load or store faults.
  *
  * Note: this idiom only works when memcpy() compiles to a leaf function.
- * If "sp" is updated during memcpy, the "jrp lr" will be incorrect.
+ * Here leaf function not only means it does not have calls, but also
+ * requires no stack operations (sp, stack frame pointer) and no
+ * use of callee-saved registers, else "jrp lr" will be incorrect since
+ * unwinding stack frame is bypassed. Since memcpy() is not complex so
+ * these conditions are satisfied here, but we need to be careful when
+ * modifying this file. This is not a clean solution but is the best
+ * one so far.
  *
  * Also note that we are capturing "n" from the containing scope here.
  */
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h
index c101098..6ac3750 100644
--- a/arch/tile/lib/spinlock_common.h
+++ b/arch/tile/lib/spinlock_common.h
@@ -60,5 +60,5 @@
 	loops += __insn_crc32_32(stack_pointer, get_cycles_low()) &
 		(loops - 1);
 
-	relax(1 << exponent);
+	relax(loops);
 }
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index cba30e9..22e58f5 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -130,7 +130,7 @@
 }
 
 /*
- * Handle a fault on the vmalloc or module mapping area
+ * Handle a fault on the vmalloc area.
  */
 static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
 {
@@ -203,9 +203,14 @@
  * interrupt or a critical region, and must do as little as possible.
  * Similarly, we can't use atomic ops here, since we may be handling a
  * fault caused by an atomic op access.
+ *
+ * If we find a migrating PTE while we're in an NMI context, and we're
+ * at a PC that has a registered exception handler, we don't wait,
+ * since this thread may (e.g.) have been interrupted while migrating
+ * its own stack, which would then cause us to self-deadlock.
  */
 static int handle_migrating_pte(pgd_t *pgd, int fault_num,
-				unsigned long address,
+				unsigned long address, unsigned long pc,
 				int is_kernel_mode, int write)
 {
 	pud_t *pud;
@@ -227,6 +232,8 @@
 		pte_offset_kernel(pmd, address);
 	pteval = *pte;
 	if (pte_migrating(pteval)) {
+		if (in_nmi() && search_exception_tables(pc))
+			return 0;
 		wait_for_migration(pte);
 		return 1;
 	}
@@ -300,7 +307,7 @@
 	 * rather than trying to patch up the existing PTE.
 	 */
 	pgd = get_current_pgd();
-	if (handle_migrating_pte(pgd, fault_num, address,
+	if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
 				 is_kernel_mode, write))
 		return 1;
 
@@ -335,9 +342,12 @@
 	/*
 	 * If we're trying to touch user-space addresses, we must
 	 * be either at PL0, or else with interrupts enabled in the
-	 * kernel, so either way we can re-enable interrupts here.
+	 * kernel, so either way we can re-enable interrupts here
+	 * unless we are doing atomic access to user space with
+	 * interrupts disabled.
 	 */
-	local_irq_enable();
+	if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
+		local_irq_enable();
 
 	mm = tsk->mm;
 
@@ -665,7 +675,7 @@
 	 */
 	if (fault_num == INT_DTLB_ACCESS)
 		write = 1;
-	if (handle_migrating_pte(pgd, fault_num, address, 1, write))
+	if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
 		return state;
 
 	/* Return zero so that we continue on with normal fault handling. */
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 1cc6ae4..499f737 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -394,6 +394,7 @@
 		return pte_to_home(*virt_to_pte(NULL, kva));
 	}
 }
+EXPORT_SYMBOL(page_home);
 
 void homecache_change_page_home(struct page *page, int order, int home)
 {
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 830c490..6a9d20d 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -254,11 +254,6 @@
 		return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
 	}
 
-	/* As a performance optimization, keep the boot init stack here. */
-	if (address >= (ulong)&init_thread_union &&
-	    address < (ulong)&init_thread_union + THREAD_SIZE)
-		return construct_pgprot(PAGE_KERNEL, smp_processor_id());
-
 #ifndef __tilegx__
 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
 	/* Force the atomic_locks[] array page to be hash-for-home. */
@@ -557,6 +552,7 @@
 
 	address = MEM_SV_INTRPT;
 	pmd = get_pmd(pgtables, address);
+	pfn = 0;  /* code starts at PA 0 */
 	if (ktext_small) {
 		/* Allocate an L2 PTE for the kernel text */
 		int cpu = 0;
@@ -579,10 +575,15 @@
 		}
 
 		BUG_ON(address != (unsigned long)_stext);
-		pfn = 0;  /* code starts at PA 0 */
-		pte = alloc_pte();
-		for (pte_ofs = 0; address < (unsigned long)_einittext;
-		     pfn++, pte_ofs++, address += PAGE_SIZE) {
+		pte = NULL;
+		for (; address < (unsigned long)_einittext;
+		     pfn++, address += PAGE_SIZE) {
+			pte_ofs = pte_index(address);
+			if (pte_ofs == 0) {
+				if (pte)
+					assign_pte(pmd++, pte);
+				pte = alloc_pte();
+			}
 			if (!ktext_local) {
 				prot = set_remote_cache_cpu(prot, cpu);
 				cpu = cpumask_next(cpu, &ktext_mask);
@@ -591,7 +592,8 @@
 			}
 			pte[pte_ofs] = pfn_pte(pfn, prot);
 		}
-		assign_pte(pmd, pte);
+		if (pte)
+			assign_pte(pmd, pte);
 	} else {
 		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
 		pteval = pte_mkhuge(pteval);
@@ -614,7 +616,9 @@
 		else
 			pteval = hv_pte_set_mode(pteval,
 						 HV_PTE_MODE_CACHE_NO_L3);
-		*(pte_t *)pmd = pteval;
+		for (; address < (unsigned long)_einittext;
+		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
+			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
 	}
 
 	/* Set swapper_pgprot here so it is flushed to memory right away. */
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 8730369..2410aa8 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -177,14 +177,10 @@
 	if (!pmd_huge_page(*pmd))
 		return;
 
-	/*
-	 * Grab the pgd_lock, since we may need it to walk the pgd_list,
-	 * and since we need some kind of lock here to avoid races.
-	 */
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock_irqsave(&init_mm.page_table_lock, flags);
 	if (!pmd_huge_page(*pmd)) {
 		/* Lost the race to convert the huge page. */
-		spin_unlock_irqrestore(&pgd_lock, flags);
+		spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
 		return;
 	}
 
@@ -194,6 +190,7 @@
 
 #ifdef __PAGETABLE_PMD_FOLDED
 	/* Walk every pgd on the system and update the pmd there. */
+	spin_lock(&pgd_lock);
 	list_for_each(pos, &pgd_list) {
 		pmd_t *copy_pmd;
 		pgd = list_to_pgd(pos) + pgd_index(addr);
@@ -201,6 +198,7 @@
 		copy_pmd = pmd_offset(pud, addr);
 		__set_pmd(copy_pmd, *pmd);
 	}
+	spin_unlock(&pgd_lock);
 #endif
 
 	/* Tell every cpu to notice the change. */
@@ -208,7 +206,7 @@
 		     cpu_possible_mask, NULL, 0);
 
 	/* Hold the lock until the TLB flush is finished to avoid races. */
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
 }
 
 /*
@@ -217,9 +215,13 @@
  * against pageattr.c; it is the unique case in which a valid change
  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  * vmalloc faults work because attached pagetables are never freed.
- * The locking scheme was chosen on the basis of manfred's
- * recommendations and having no core impact whatsoever.
- * -- wli
+ *
+ * The lock is always taken with interrupts disabled, unlike on x86
+ * and other platforms, because we need to take the lock in
+ * shatter_huge_page(), which may be called from an interrupt context.
+ * We are not at risk from the tlbflush IPI deadlock that was seen on
+ * x86, since we use the flush_remote() API to have the hypervisor do
+ * the TLB flushes regardless of irq disabling.
  */
 DEFINE_SPINLOCK(pgd_lock);
 LIST_HEAD(pgd_list);
@@ -469,10 +471,18 @@
 
 void set_pte(pte_t *ptep, pte_t pte)
 {
-	struct page *page = pfn_to_page(pte_pfn(pte));
-
-	/* Update the home of a PTE if necessary */
-	pte = pte_set_home(pte, page_home(page));
+	if (pte_present(pte) &&
+	    (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
+		/* The PTE actually references physical memory. */
+		unsigned long pfn = pte_pfn(pte);
+		if (pfn_valid(pfn)) {
+			/* Update the home of the PTE from the struct page. */
+			pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
+		} else if (hv_pte_get_mode(pte) == 0) {
+			/* remap_pfn_range(), etc, must supply PTE mode. */
+			panic("set_pte(): out-of-range PFN and mode 0\n");
+		}
+	}
 
 	__set_pte(ptep, pte);
 }
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h
index dc36b22..6673508 100644
--- a/arch/um/drivers/cow.h
+++ b/arch/um/drivers/cow.h
@@ -3,41 +3,6 @@
 
 #include <asm/types.h>
 
-#if defined(__KERNEL__)
-
-# include <asm/byteorder.h>
-
-# if defined(__BIG_ENDIAN)
-#	define ntohll(x) (x)
-#	define htonll(x) (x)
-# elif defined(__LITTLE_ENDIAN)
-#	define ntohll(x)  be64_to_cpu(x)
-#	define htonll(x)  cpu_to_be64(x)
-# else
-#	error "Could not determine byte order"
-# endif
-
-#else
-/* For the definition of ntohl, htonl and __BYTE_ORDER */
-#include <endian.h>
-#include <netinet/in.h>
-#if defined(__BYTE_ORDER)
-
-#  if __BYTE_ORDER == __BIG_ENDIAN
-#	define ntohll(x) (x)
-#	define htonll(x) (x)
-#  elif __BYTE_ORDER == __LITTLE_ENDIAN
-#	define ntohll(x)  bswap_64(x)
-#	define htonll(x)  bswap_64(x)
-#  else
-#	error "Could not determine byte order: __BYTE_ORDER uncorrectly defined"
-#  endif
-
-#else  /* ! defined(__BYTE_ORDER) */
-#	error "Could not determine byte order: __BYTE_ORDER not defined"
-#endif
-#endif /* ! defined(__KERNEL__) */
-
 extern int init_cow_file(int fd, char *cow_file, char *backing_file,
 			 int sectorsize, int alignment, int *bitmap_offset_out,
 			 unsigned long *bitmap_len_out, int *data_offset_out);
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c
index 9cbb426..0ee9cc6 100644
--- a/arch/um/drivers/cow_user.c
+++ b/arch/um/drivers/cow_user.c
@@ -8,11 +8,10 @@
  * that.
  */
 #include <unistd.h>
-#include <byteswap.h>
 #include <errno.h>
 #include <string.h>
 #include <arpa/inet.h>
-#include <asm/types.h>
+#include <endian.h>
 #include "cow.h"
 #include "cow_sys.h"
 
@@ -214,8 +213,8 @@
 			   "header\n");
 		goto out;
 	}
-	header->magic = htonl(COW_MAGIC);
-	header->version = htonl(COW_VERSION);
+	header->magic = htobe32(COW_MAGIC);
+	header->version = htobe32(COW_VERSION);
 
 	err = -EINVAL;
 	if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
@@ -246,10 +245,10 @@
 		goto out_free;
 	}
 
-	header->mtime = htonl(modtime);
-	header->size = htonll(*size);
-	header->sectorsize = htonl(sectorsize);
-	header->alignment = htonl(alignment);
+	header->mtime = htobe32(modtime);
+	header->size = htobe64(*size);
+	header->sectorsize = htobe32(sectorsize);
+	header->alignment = htobe32(alignment);
 	header->cow_format = COW_BITMAP;
 
 	err = cow_write_file(fd, header, sizeof(*header));
@@ -301,8 +300,8 @@
 	magic = header->v1.magic;
 	if (magic == COW_MAGIC)
 		version = header->v1.version;
-	else if (magic == ntohl(COW_MAGIC))
-		version = ntohl(header->v1.version);
+	else if (magic == be32toh(COW_MAGIC))
+		version = be32toh(header->v1.version);
 	/* No error printed because the non-COW case comes through here */
 	else goto out;
 
@@ -327,9 +326,9 @@
 				   "header\n");
 			goto out;
 		}
-		*mtime_out = ntohl(header->v2.mtime);
-		*size_out = ntohll(header->v2.size);
-		*sectorsize_out = ntohl(header->v2.sectorsize);
+		*mtime_out = be32toh(header->v2.mtime);
+		*size_out = be64toh(header->v2.size);
+		*sectorsize_out = be32toh(header->v2.sectorsize);
 		*bitmap_offset_out = sizeof(header->v2);
 		*align_out = *sectorsize_out;
 		file = header->v2.backing_file;
@@ -341,10 +340,10 @@
 				   "header\n");
 			goto out;
 		}
-		*mtime_out = ntohl(header->v3.mtime);
-		*size_out = ntohll(header->v3.size);
-		*sectorsize_out = ntohl(header->v3.sectorsize);
-		*align_out = ntohl(header->v3.alignment);
+		*mtime_out = be32toh(header->v3.mtime);
+		*size_out = be64toh(header->v3.size);
+		*sectorsize_out = be32toh(header->v3.sectorsize);
+		*align_out = be32toh(header->v3.alignment);
 		if (*align_out == 0) {
 			cow_printf("read_cow_header - invalid COW header, "
 				   "align == 0\n");
@@ -366,16 +365,16 @@
 		 * this was used until Dec2005 - 64bits are needed to represent
 		 * 2038+. I.e. we can safely do this truncating cast.
 		 *
-		 * Additionally, we must use ntohl() instead of ntohll(), since
+		 * Additionally, we must use be32toh() instead of be64toh(), since
 		 * the program used to use the former (tested - I got mtime
 		 * mismatch "0 vs whatever").
 		 *
 		 * Ever heard about bug-to-bug-compatibility ? ;-) */
-		*mtime_out = (time32_t) ntohl(header->v3_b.mtime);
+		*mtime_out = (time32_t) be32toh(header->v3_b.mtime);
 
-		*size_out = ntohll(header->v3_b.size);
-		*sectorsize_out = ntohl(header->v3_b.sectorsize);
-		*align_out = ntohl(header->v3_b.alignment);
+		*size_out = be64toh(header->v3_b.size);
+		*sectorsize_out = be32toh(header->v3_b.sectorsize);
+		*align_out = be32toh(header->v3_b.alignment);
 		if (*align_out == 0) {
 			cow_printf("read_cow_header - invalid COW header, "
 				   "align == 0\n");
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index e672bd6..43b39d6 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -22,6 +22,7 @@
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
 #include <asm/uaccess.h>
+#include <asm/switch_to.h>
 
 #include "init.h"
 #include "irq_kern.h"
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 8419f5c..fff2435 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,3 +1,4 @@
 generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
 generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
-generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h
+generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
+generic-y += switch_to.h
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 492bc4c..65a1c3d 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -3,9 +3,10 @@
 # Licensed under the GPL
 #
 
-CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
-                        -DELF_ARCH=$(LDS_ELF_ARCH)        \
-                        -DELF_FORMAT=$(LDS_ELF_FORMAT)
+CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START)		\
+                        -DELF_ARCH=$(LDS_ELF_ARCH)	\
+                        -DELF_FORMAT=$(LDS_ELF_FORMAT)	\
+			$(LDS_EXTRA)
 extra-y := vmlinux.lds
 clean-files :=
 
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f386d04..2b73ded 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -88,11 +88,8 @@
 
 extern void arch_switch_to(struct task_struct *to);
 
-void *_switch_to(void *prev, void *next, void *last)
+void *__switch_to(struct task_struct *from, struct task_struct *to)
 {
-	struct task_struct *from = prev;
-	struct task_struct *to = next;
-
 	to->thread.prev_sched = from;
 	set_current(to);
 
@@ -111,7 +108,6 @@
 	} while (current->thread.saved_task);
 
 	return current->thread.prev_sched;
-
 }
 
 void interrupt_end(void)
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 4947b31..0a49ef0 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -103,7 +103,6 @@
 
 void uml_setup_stubs(struct mm_struct *mm)
 {
-	struct page **pages;
 	int err, ret;
 
 	if (!skas_needs_stub)
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 2e9852c..0a9e57e 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -41,7 +41,7 @@
 	cpu_tasks[0].pid = pid;
 	cpu_tasks[0].task = current;
 #ifdef CONFIG_SMP
-	cpu_online_map = cpumask_of_cpu(0);
+	init_cpu_online(get_cpu_mask(0));
 #endif
 	start_kernel();
 	return 0;
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 155206a..6f588e1 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -76,7 +76,7 @@
 		cpu_relax();
 
 	notify_cpu_starting(cpu);
-	cpu_set(cpu, cpu_online_map);
+	set_cpu_online(cpu, true);
 	default_idle();
 	return 0;
 }
@@ -110,8 +110,7 @@
 	for (i = 0; i < ncpus; ++i)
 		set_cpu_possible(i, true);
 
-	cpu_clear(me, cpu_online_map);
-	cpu_set(me, cpu_online_map);
+	set_cpu_online(me, true);
 	cpu_set(me, cpu_callin_map);
 
 	err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
@@ -138,13 +137,13 @@
 
 void smp_prepare_boot_cpu(void)
 {
-	cpu_set(smp_processor_id(), cpu_online_map);
+	set_cpu_online(smp_processor_id(), true);
 }
 
 int __cpu_up(unsigned int cpu)
 {
 	cpu_set(cpu, smp_commenced_mask);
-	while (!cpu_isset(cpu, cpu_online_map))
+	while (!cpu_online(cpu))
 		mb();
 	return 0;
 }
diff --git a/arch/unicore32/boot/Makefile b/arch/unicore32/boot/Makefile
index 79e5f88..ec7fb70 100644
--- a/arch/unicore32/boot/Makefile
+++ b/arch/unicore32/boot/Makefile
@@ -11,8 +11,6 @@
 # Copyright (C) 2001~2010 GUAN Xue-tao
 #
 
-MKIMAGE := $(srctree)/scripts/mkuboot.sh
-
 targets := Image zImage uImage
 
 $(obj)/Image: vmlinux FORCE
@@ -26,14 +24,8 @@
 	$(call if_changed,objcopy)
 	@echo '  Kernel: $@ is ready'
 
-quiet_cmd_uimage = UIMAGE  $@
-      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A unicore -O linux -T kernel \
-		   -C none -a $(LOADADDR) -e $(STARTADDR) \
-		   -n 'Linux-$(KERNELRELEASE)' -d $< $@
-
-$(obj)/uImage: LOADADDR=0x0
-
-$(obj)/uImage: STARTADDR=$(LOADADDR)
+UIMAGE_ARCH = unicore
+UIMAGE_LOADADDR = 0x0
 
 $(obj)/uImage: $(obj)/zImage FORCE
 	$(call if_changed,uimage)
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index 9258e59..366460a 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -82,20 +82,26 @@
 	return 0;
 }
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t flag,
+				    struct dma_attrs *attrs)
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
-	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+	return dma_ops->alloc(dev, size, dma_handle, flag, attrs);
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *cpu_addr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
-	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+	dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
index bfa9fbb..16c08b2 100644
--- a/arch/unicore32/mm/dma-swiotlb.c
+++ b/arch/unicore32/mm/dma-swiotlb.c
@@ -17,9 +17,23 @@
 
 #include <asm/dma.h>
 
+static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
+					    dma_addr_t *dma_handle, gfp_t flags,
+					    struct dma_attrs *attrs)
+{
+	return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+}
+
+static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
+					  void *vaddr, dma_addr_t dma_addr,
+					  struct dma_attrs *attrs)
+{
+	swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
 struct dma_map_ops swiotlb_dma_map_ops = {
-	.alloc_coherent = swiotlb_alloc_coherent,
-	.free_coherent = swiotlb_free_coherent,
+	.alloc = unicore_swiotlb_alloc_coherent,
+	.free = unicore_swiotlb_free_coherent,
 	.map_sg = swiotlb_map_sg_attrs,
 	.unmap_sg = swiotlb_unmap_sg_attrs,
 	.dma_supported = swiotlb_dma_supported,
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 968dbe2..41a7237 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -129,6 +129,7 @@
 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 # prevent gcc from generating any FP code by mistake
 KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
 
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 4be406a..36b62bc 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -14,6 +14,9 @@
 
 export LDFLAGS
 
+LDS_EXTRA		:= -Ui386
+export LDS_EXTRA
+
 # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
 include $(srctree)/arch/x86/Makefile_32.cpu
 
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index b3b7332..99480e5 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -43,7 +43,7 @@
 		switch (sizeof(*(ptr))) {				\
 		case __X86_CASE_B:					\
 			asm volatile (lock #op "b %b0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : "+q" (__ret), "+m" (*(ptr))	\
 				      : : "memory", "cc");		\
 			break;						\
 		case __X86_CASE_W:					\
@@ -173,7 +173,7 @@
 		switch (sizeof(*(ptr))) {				\
 		case __X86_CASE_B:					\
 			asm volatile (lock "addb %b1, %0\n"		\
-				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "+m" (*(ptr)) : "qi" (inc)	\
 				      : "memory", "cc");		\
 			break;						\
 		case __X86_CASE_W:					\
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index ed3065f..4b4331d 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -59,7 +59,8 @@
 extern int dma_set_mask(struct device *dev, u64 mask);
 
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
-					dma_addr_t *dma_addr, gfp_t flag);
+					dma_addr_t *dma_addr, gfp_t flag,
+					struct dma_attrs *attrs);
 
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
@@ -111,9 +112,11 @@
        return gfp;
 }
 
+#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
+
 static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-		gfp_t gfp)
+dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		gfp_t gfp, struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 	void *memory;
@@ -129,18 +132,21 @@
 	if (!is_device_dma_capable(dev))
 		return NULL;
 
-	if (!ops->alloc_coherent)
+	if (!ops->alloc)
 		return NULL;
 
-	memory = ops->alloc_coherent(dev, size, dma_handle,
-				     dma_alloc_coherent_gfp_flags(dev, gfp));
+	memory = ops->alloc(dev, size, dma_handle,
+			    dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
 	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
 
 	return memory;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t bus)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *vaddr, dma_addr_t bus,
+				  struct dma_attrs *attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
@@ -150,8 +156,8 @@
 		return;
 
 	debug_dma_free_coherent(dev, size, vaddr, bus);
-	if (ops->free_coherent)
-		ops->free_coherent(dev, size, vaddr, bus);
+	if (ops->free)
+		ops->free(dev, size, vaddr, bus, attrs);
 }
 
 #endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7284c9a..4fa7dcc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -974,16 +974,6 @@
 #define cpu_has_amd_erratum(x)	(false)
 #endif /* CONFIG_CPU_SUP_AMD */
 
-#ifdef CONFIG_X86_32
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-#endif
-
-void disable_hlt(void);
-void enable_hlt(void);
-
 void cpu_idle_wait(void);
 
 extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8be5f54..e054459 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -557,6 +557,8 @@
 
 extern unsigned long
 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
+extern __must_check long
+strncpy_from_user(char *dst, const char __user *src, long count);
 
 /*
  * movsl can be slow when source and dest are not both 8-byte aligned
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 566e803..8084bc73 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,11 +213,6 @@
 	return n;
 }
 
-long __must_check strncpy_from_user(char *dst, const char __user *src,
-				    long count);
-long __must_check __strncpy_from_user(char *dst,
-				      const char __user *src, long count);
-
 /**
  * strlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 1c66d30..fcd4b6f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,10 +208,6 @@
 	}
 }
 
-__must_check long
-strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long
-__strncpy_from_user(char *dst, const char __user *src, long count);
 __must_check long strnlen_user(const char __user *str, long n);
 __must_check long __strnlen_user(const char __user *str, long n);
 __must_check long strlen_user(const char __user *str);
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..6fe6767
--- /dev/null
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+	return mask*0x0001020304050608ul >> 56;
+}
+
+#else	/* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+	long a = (0x0ff0001+mask) >> 23;
+	/* Fix the 1 for 00 case */
+	return a & mask;
+}
+
+#endif
+
+#define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
+
+/* Return the high bit set in the first byte that is a zero */
+static inline unsigned long has_zero(unsigned long a)
+{
+	return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 0f42c2f..a415b1f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -642,6 +642,7 @@
 	kfree(buffer.pointer);
 	buffer.length = ACPI_ALLOCATE_BUFFER;
 	buffer.pointer = NULL;
+	lapic = NULL;
 
 	if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
 		goto out;
@@ -650,7 +651,7 @@
 		goto free_tmp_map;
 
 	cpumask_copy(tmp_map, cpu_present_mask);
-	acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
+	acpi_register_lapic(physid, ACPI_MADT_ENABLED);
 
 	/*
 	 * If mp_register_lapic successfully generates a new logical cpu
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index b1e7c7f..e6631120 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -477,7 +477,7 @@
 /* allocate and map a coherent mapping */
 static void *
 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
-		    gfp_t flag)
+		    gfp_t flag, struct dma_attrs *attrs)
 {
 	dma_addr_t paddr;
 	unsigned long align_mask;
@@ -500,7 +500,8 @@
 		}
 		__free_pages(page, get_order(size));
 	} else
-		return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
+		return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
+						  attrs);
 
 	return NULL;
 }
@@ -508,7 +509,7 @@
 /* free a coherent mapping */
 static void
 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
-		   dma_addr_t dma_addr)
+		   dma_addr_t dma_addr, struct dma_attrs *attrs)
 {
 	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
 	free_pages((unsigned long)vaddr, get_order(size));
@@ -700,8 +701,8 @@
 	.unmap_sg			= gart_unmap_sg,
 	.map_page			= gart_map_page,
 	.unmap_page			= gart_unmap_page,
-	.alloc_coherent			= gart_alloc_coherent,
-	.free_coherent			= gart_free_coherent,
+	.alloc				= gart_alloc_coherent,
+	.free				= gart_free_coherent,
 	.mapping_error			= gart_mapping_error,
 };
 
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 40883ff..bb8e034 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1313,6 +1313,11 @@
 	pr_info("no hardware sampling interrupt available.\n");
 }
 
+static struct attribute_group x86_pmu_format_group = {
+	.name = "format",
+	.attrs = NULL,
+};
+
 static int __init init_hw_perf_events(void)
 {
 	struct x86_pmu_quirk *quirk;
@@ -1387,6 +1392,7 @@
 	}
 
 	x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+	x86_pmu_format_group.attrs = x86_pmu.format_attrs;
 
 	pr_info("... version:                %d\n",     x86_pmu.version);
 	pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
@@ -1615,6 +1621,9 @@
 {
 	int idx = event->hw.idx;
 
+	if (!x86_pmu.attr_rdpmc)
+		return 0;
+
 	if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
 		idx -= X86_PMC_IDX_FIXED;
 		idx |= 1 << 30;
@@ -1667,6 +1676,7 @@
 
 static const struct attribute_group *x86_pmu_attr_groups[] = {
 	&x86_pmu_attr_group,
+	&x86_pmu_format_group,
 	NULL,
 };
 
@@ -1698,14 +1708,19 @@
 	.flush_branch_stack	= x86_pmu_flush_branch_stack,
 };
 
-void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
+	userpg->cap_usr_time = 0;
+	userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+	userpg->pmc_width = x86_pmu.cntval_bits;
+
 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
 		return;
 
 	if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 		return;
 
+	userpg->cap_usr_time = 1;
 	userpg->time_mult = this_cpu_read(cyc2ns);
 	userpg->time_shift = CYC2NS_SCALE_FACTOR;
 	userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8484e77..6638aaf 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -339,6 +339,7 @@
 	 * sysfs attrs
 	 */
 	int		attr_rdpmc;
+	struct attribute **format_attrs;
 
 	/*
 	 * CPU Hotplug hooks
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dd002fa..95e7fe1 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -404,6 +404,21 @@
 	}
 }
 
+PMU_FORMAT_ATTR(event,	"config:0-7,32-35");
+PMU_FORMAT_ATTR(umask,	"config:8-15"	);
+PMU_FORMAT_ATTR(edge,	"config:18"	);
+PMU_FORMAT_ATTR(inv,	"config:23"	);
+PMU_FORMAT_ATTR(cmask,	"config:24-31"	);
+
+static struct attribute *amd_format_attr[] = {
+	&format_attr_event.attr,
+	&format_attr_umask.attr,
+	&format_attr_edge.attr,
+	&format_attr_inv.attr,
+	&format_attr_cmask.attr,
+	NULL,
+};
+
 static __initconst const struct x86_pmu amd_pmu = {
 	.name			= "AMD",
 	.handle_irq		= x86_pmu_handle_irq,
@@ -426,6 +441,8 @@
 	.get_event_constraints	= amd_get_event_constraints,
 	.put_event_constraints	= amd_put_event_constraints,
 
+	.format_attrs		= amd_format_attr,
+
 	.cpu_prepare		= amd_pmu_cpu_prepare,
 	.cpu_starting		= amd_pmu_cpu_starting,
 	.cpu_dead		= amd_pmu_cpu_dead,
@@ -596,6 +613,7 @@
 	.cpu_dead		= amd_pmu_cpu_dead,
 #endif
 	.cpu_starting		= amd_pmu_cpu_starting,
+	.format_attrs		= amd_format_attr,
 };
 
 __init int amd_pmu_init(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6a84e7f..26b3e2f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1431,6 +1431,24 @@
 	}
 }
 
+PMU_FORMAT_ATTR(event,	"config:0-7"	);
+PMU_FORMAT_ATTR(umask,	"config:8-15"	);
+PMU_FORMAT_ATTR(edge,	"config:18"	);
+PMU_FORMAT_ATTR(pc,	"config:19"	);
+PMU_FORMAT_ATTR(any,	"config:21"	); /* v3 + */
+PMU_FORMAT_ATTR(inv,	"config:23"	);
+PMU_FORMAT_ATTR(cmask,	"config:24-31"	);
+
+static struct attribute *intel_arch_formats_attr[] = {
+	&format_attr_event.attr,
+	&format_attr_umask.attr,
+	&format_attr_edge.attr,
+	&format_attr_pc.attr,
+	&format_attr_inv.attr,
+	&format_attr_cmask.attr,
+	NULL,
+};
+
 static __initconst const struct x86_pmu core_pmu = {
 	.name			= "core",
 	.handle_irq		= x86_pmu_handle_irq,
@@ -1455,6 +1473,7 @@
 	.put_event_constraints	= intel_put_event_constraints,
 	.event_constraints	= intel_core_event_constraints,
 	.guest_get_msrs		= core_guest_get_msrs,
+	.format_attrs		= intel_arch_formats_attr,
 };
 
 struct intel_shared_regs *allocate_shared_regs(int cpu)
@@ -1553,6 +1572,21 @@
 		intel_pmu_lbr_reset();
 }
 
+PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
+
+static struct attribute *intel_arch3_formats_attr[] = {
+	&format_attr_event.attr,
+	&format_attr_umask.attr,
+	&format_attr_edge.attr,
+	&format_attr_pc.attr,
+	&format_attr_any.attr,
+	&format_attr_inv.attr,
+	&format_attr_cmask.attr,
+
+	&format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
+	NULL,
+};
+
 static __initconst const struct x86_pmu intel_pmu = {
 	.name			= "Intel",
 	.handle_irq		= intel_pmu_handle_irq,
@@ -1576,6 +1610,8 @@
 	.get_event_constraints	= intel_get_event_constraints,
 	.put_event_constraints	= intel_put_event_constraints,
 
+	.format_attrs		= intel_arch3_formats_attr,
+
 	.cpu_prepare		= intel_pmu_cpu_prepare,
 	.cpu_starting		= intel_pmu_cpu_starting,
 	.cpu_dying		= intel_pmu_cpu_dying,
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index ef484d9..a2dfacf 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1271,6 +1271,17 @@
 	return num ? -EINVAL : 0;
 }
 
+PMU_FORMAT_ATTR(cccr, "config:0-31" );
+PMU_FORMAT_ATTR(escr, "config:32-62");
+PMU_FORMAT_ATTR(ht,   "config:63"   );
+
+static struct attribute *intel_p4_formats_attr[] = {
+	&format_attr_cccr.attr,
+	&format_attr_escr.attr,
+	&format_attr_ht.attr,
+	NULL,
+};
+
 static __initconst const struct x86_pmu p4_pmu = {
 	.name			= "Netburst P4/Xeon",
 	.handle_irq		= p4_pmu_handle_irq,
@@ -1305,6 +1316,8 @@
 	 * the former idea is taken from OProfile code
 	 */
 	.perfctr_second_write	= 1,
+
+	.format_attrs		= intel_p4_formats_attr,
 };
 
 __init int p4_pmu_init(void)
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index c7181be..32bcfc7 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -87,6 +87,23 @@
 	(void)checking_wrmsrl(hwc->config_base, val);
 }
 
+PMU_FORMAT_ATTR(event,	"config:0-7"	);
+PMU_FORMAT_ATTR(umask,	"config:8-15"	);
+PMU_FORMAT_ATTR(edge,	"config:18"	);
+PMU_FORMAT_ATTR(pc,	"config:19"	);
+PMU_FORMAT_ATTR(inv,	"config:23"	);
+PMU_FORMAT_ATTR(cmask,	"config:24-31"	);
+
+static struct attribute *intel_p6_formats_attr[] = {
+	&format_attr_event.attr,
+	&format_attr_umask.attr,
+	&format_attr_edge.attr,
+	&format_attr_pc.attr,
+	&format_attr_inv.attr,
+	&format_attr_cmask.attr,
+	NULL,
+};
+
 static __initconst const struct x86_pmu p6_pmu = {
 	.name			= "p6",
 	.handle_irq		= x86_pmu_handle_irq,
@@ -115,6 +132,8 @@
 	.cntval_mask		= (1ULL << 32) - 1,
 	.get_event_constraints	= x86_get_event_constraints,
 	.event_constraints	= p6_event_constraints,
+
+	.format_attrs		= intel_p6_formats_attr,
 };
 
 __init int p6_pmu_init(void)
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 7943e0c..3dafc60 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -282,8 +282,13 @@
 		else if (!(warned++))
 			set_affinity = 0;
 
+		/*
+		 * We unmask if the irq was not marked masked by the
+		 * core code. That respects the lazy irq disable
+		 * behaviour.
+		 */
 		if (!irqd_can_move_in_process_context(data) &&
-		    !irqd_irq_disabled(data) && chip->irq_unmask)
+		    !irqd_irq_masked(data) && chip->irq_unmask)
 			chip->irq_unmask(data);
 
 		raw_spin_unlock(&desc->lock);
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 90fcf62..1d5d31e 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -68,16 +68,9 @@
 	return count;
 }
 
-static int setup_data_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
 static const struct file_operations fops_setup_data = {
 	.read		= setup_data_read,
-	.open		= setup_data_open,
+	.open		= simple_open,
 	.llseek		= default_llseek,
 };
 
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index db6720e..8bfb614 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -43,6 +43,8 @@
 #include <linux/smp.h>
 #include <linux/nmi.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/uaccess.h>
+#include <linux/memory.h>
 
 #include <asm/debugreg.h>
 #include <asm/apicdef.h>
@@ -741,6 +743,64 @@
 	regs->ip = ip;
 }
 
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+	int err;
+	char opc[BREAK_INSTR_SIZE];
+
+	bpt->type = BP_BREAKPOINT;
+	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+				BREAK_INSTR_SIZE);
+	if (err)
+		return err;
+	err = probe_kernel_write((char *)bpt->bpt_addr,
+				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+#ifdef CONFIG_DEBUG_RODATA
+	if (!err)
+		return err;
+	/*
+	 * It is safe to call text_poke() because normal kernel execution
+	 * is stopped on all cores, so long as the text_mutex is not locked.
+	 */
+	if (mutex_is_locked(&text_mutex))
+		return -EBUSY;
+	text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+		  BREAK_INSTR_SIZE);
+	err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+	if (err)
+		return err;
+	if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
+		return -EINVAL;
+	bpt->type = BP_POKE_BREAKPOINT;
+#endif /* CONFIG_DEBUG_RODATA */
+	return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+#ifdef CONFIG_DEBUG_RODATA
+	int err;
+	char opc[BREAK_INSTR_SIZE];
+
+	if (bpt->type != BP_POKE_BREAKPOINT)
+		goto knl_write;
+	/*
+	 * It is safe to call text_poke() because normal kernel execution
+	 * is stopped on all cores, so long as the text_mutex is not locked.
+	 */
+	if (mutex_is_locked(&text_mutex))
+		goto knl_write;
+	text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
+	err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+	if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
+		goto knl_write;
+	return err;
+knl_write:
+#endif /* CONFIG_DEBUG_RODATA */
+	return probe_kernel_write((char *)bpt->bpt_addr,
+				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+}
+
 struct kgdb_arch arch_kgdb_ops = {
 	/* Breakpoint instruction: */
 	.gdb_bpt_instr		= { 0xcc },
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 694d801..b8ba6e4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -38,6 +38,7 @@
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/tlbflush.h>
+#include <asm/idle.h>
 
 static int kvmapf = 1;
 
@@ -253,7 +254,10 @@
 		kvm_async_pf_task_wait((u32)read_cr2());
 		break;
 	case KVM_PV_REASON_PAGE_READY:
+		rcu_irq_enter();
+		exit_idle();
 		kvm_async_pf_task_wake((u32)read_cr2());
+		rcu_irq_exit();
 		break;
 	}
 }
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 6ac5782..d0b2fb9 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -430,7 +430,7 @@
 }
 
 static void* calgary_alloc_coherent(struct device *dev, size_t size,
-	dma_addr_t *dma_handle, gfp_t flag)
+	dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
 {
 	void *ret = NULL;
 	dma_addr_t mapping;
@@ -463,7 +463,8 @@
 }
 
 static void calgary_free_coherent(struct device *dev, size_t size,
-				  void *vaddr, dma_addr_t dma_handle)
+				  void *vaddr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
 	unsigned int npages;
 	struct iommu_table *tbl = find_iommu_table(dev);
@@ -476,8 +477,8 @@
 }
 
 static struct dma_map_ops calgary_dma_ops = {
-	.alloc_coherent = calgary_alloc_coherent,
-	.free_coherent = calgary_free_coherent,
+	.alloc = calgary_alloc_coherent,
+	.free = calgary_free_coherent,
 	.map_sg = calgary_map_sg,
 	.unmap_sg = calgary_unmap_sg,
 	.map_page = calgary_map_page,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 28e5e06f..3003250 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -96,7 +96,8 @@
 	}
 }
 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
-				 dma_addr_t *dma_addr, gfp_t flag)
+				 dma_addr_t *dma_addr, gfp_t flag,
+				 struct dma_attrs *attrs)
 {
 	unsigned long dma_mask;
 	struct page *page;
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 3af4af81..f960506 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -75,7 +75,7 @@
 }
 
 static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
-				dma_addr_t dma_addr)
+				dma_addr_t dma_addr, struct dma_attrs *attrs)
 {
 	free_pages((unsigned long)vaddr, get_order(size));
 }
@@ -96,8 +96,8 @@
 }
 
 struct dma_map_ops nommu_dma_ops = {
-	.alloc_coherent		= dma_generic_alloc_coherent,
-	.free_coherent		= nommu_free_coherent,
+	.alloc			= dma_generic_alloc_coherent,
+	.free			= nommu_free_coherent,
 	.map_sg			= nommu_map_sg,
 	.map_page		= nommu_map_page,
 	.sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 8f972cb..6c483ba 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -15,21 +15,30 @@
 int swiotlb __read_mostly;
 
 static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-					dma_addr_t *dma_handle, gfp_t flags)
+					dma_addr_t *dma_handle, gfp_t flags,
+					struct dma_attrs *attrs)
 {
 	void *vaddr;
 
-	vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
+	vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
+					   attrs);
 	if (vaddr)
 		return vaddr;
 
 	return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
 }
 
+static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+				      void *vaddr, dma_addr_t dma_addr,
+				      struct dma_attrs *attrs)
+{
+	swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
 static struct dma_map_ops swiotlb_dma_ops = {
 	.mapping_error = swiotlb_dma_mapping_error,
-	.alloc_coherent = x86_swiotlb_alloc_coherent,
-	.free_coherent = swiotlb_free_coherent,
+	.alloc = x86_swiotlb_alloc_coherent,
+	.free = x86_swiotlb_free_coherent,
 	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 	.sync_single_for_device = swiotlb_sync_single_for_device,
 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index a33afaa..1d92a5a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -362,34 +362,10 @@
 EXPORT_SYMBOL(pm_idle);
 #endif
 
-#ifdef CONFIG_X86_32
-/*
- * This halt magic was a workaround for ancient floppy DMA
- * wreckage. It should be safe to remove.
- */
-static int hlt_counter;
-void disable_hlt(void)
-{
-	hlt_counter++;
-}
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
-	hlt_counter--;
-}
-EXPORT_SYMBOL(enable_hlt);
-
-static inline int hlt_use_halt(void)
-{
-	return (!hlt_counter && boot_cpu_data.hlt_works_ok);
-}
-#else
 static inline int hlt_use_halt(void)
 {
 	return 1;
 }
-#endif
 
 #ifndef CONFIG_SMP
 static inline void play_dead(void)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ce13315..6e1e406 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -50,6 +50,7 @@
 #include <linux/tboot.h>
 #include <linux/stackprotector.h>
 #include <linux/gfp.h>
+#include <linux/cpuidle.h>
 
 #include <asm/acpi.h>
 #include <asm/desc.h>
@@ -1404,7 +1405,8 @@
 	tboot_shutdown(TB_SHUTDOWN_WFS);
 
 	mwait_play_dead();	/* Only returns on failure */
-	hlt_play_dead();
+	if (cpuidle_play_dead())
+		hlt_play_dead();
 }
 
 #else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index e2410e2..6410744 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -272,7 +272,7 @@
 		offsetof(struct acpi_table_facs, firmware_waking_vector);
 }
 
-void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
+static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
 {
 	static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = {
 		/* S0,1,2: */ -1, -1, -1,
@@ -281,7 +281,7 @@
 		/* S5: */ TB_SHUTDOWN_S5 };
 
 	if (!tboot_enabled())
-		return;
+		return 0;
 
 	tboot_copy_fadt(&acpi_gbl_FADT);
 	tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control;
@@ -292,10 +292,11 @@
 	if (sleep_state >= ACPI_S_STATE_COUNT ||
 	    acpi_shutdown_map[sleep_state] == -1) {
 		pr_warning("unsupported sleep state 0x%x\n", sleep_state);
-		return;
+		return -1;
 	}
 
 	tboot_shutdown(acpi_shutdown_map[sleep_state]);
+	return 0;
 }
 
 static atomic_t ap_wfs_count;
@@ -345,6 +346,8 @@
 
 	atomic_set(&ap_wfs_count, 0);
 	register_hotcpu_notifier(&tboot_cpu_notifier);
+
+	acpi_os_set_prepare_sleep(&tboot_sleep);
 	return 0;
 }
 
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index f386dc4..7515cf0e 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -216,9 +216,9 @@
 	current_thread_info()->sig_on_uaccess_error = 1;
 
 	/*
-	 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
+	 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
 	 * 64-bit, so we don't need to special-case it here.  For all the
-	 * vsyscalls, 0 means "don't write anything" not "write it at
+	 * vsyscalls, NULL means "don't write anything" not "write it at
 	 * address 0".
 	 */
 	ret = -EFAULT;
@@ -247,7 +247,7 @@
 
 		ret = sys_getcpu((unsigned __user *)regs->di,
 				 (unsigned __user *)regs->si,
-				 0);
+				 NULL);
 		break;
 	}
 
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a73f0c1..173df38 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -369,7 +369,7 @@
 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
 		if (pmu->fixed_ctr_ctrl == data)
 			return 0;
-		if (!(data & 0xfffffffffffff444)) {
+		if (!(data & 0xfffffffffffff444ull)) {
 			reprogram_fixed_counters(pmu, data);
 			return 0;
 		}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 280751c..ad85adf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3906,7 +3906,9 @@
 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
 	vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 	vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
+	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	vmx_set_cr4(&vmx->vcpu, 0);
 	vmx_set_efer(&vmx->vcpu, 0);
 	vmx_fpu_activate(&vmx->vcpu);
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 97be9cb..57252c9 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -7,6 +7,8 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 
+#include <asm/word-at-a-time.h>
+
 /*
  * best effort, GUP based copy_from_user() that is NMI-safe
  */
@@ -41,3 +43,104 @@
 	return len;
 }
 EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+
+static inline unsigned long count_bytes(unsigned long mask)
+{
+	mask = (mask - 1) & ~mask;
+	mask >>= 7;
+	return count_masked_bytes(mask);
+}
+
+/*
+ * Do a strncpy, return length of string without final '\0'.
+ * 'count' is the user-supplied count (return 'count' if we
+ * hit it), 'max' is the address space maximum (and we return
+ * -EFAULT if we hit it).
+ */
+static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, long max)
+{
+	long res = 0;
+
+	/*
+	 * Truncate 'max' to the user-specified limit, so that
+	 * we only have one limit we need to check in the loop
+	 */
+	if (max > count)
+		max = count;
+
+	while (max >= sizeof(unsigned long)) {
+		unsigned long c;
+
+		/* Fall back to byte-at-a-time if we get a page fault */
+		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+			break;
+		/* This can write a few bytes past the NUL character, but that's ok */
+		*(unsigned long *)(dst+res) = c;
+		c = has_zero(c);
+		if (c)
+			return res + count_bytes(c);
+		res += sizeof(unsigned long);
+		max -= sizeof(unsigned long);
+	}
+
+	while (max) {
+		char c;
+
+		if (unlikely(__get_user(c,src+res)))
+			return -EFAULT;
+		dst[res] = c;
+		if (!c)
+			return res;
+		res++;
+		max--;
+	}
+
+	/*
+	 * Uhhuh. We hit 'max'. But was that the user-specified maximum
+	 * too? If so, that's ok - we got as much as the user asked for.
+	 */
+	if (res >= count)
+		return count;
+
+	/*
+	 * Nope: we hit the address space limit, and we still had more
+	 * characters the caller would have wanted. That's an EFAULT.
+	 */
+	return -EFAULT;
+}
+
+/**
+ * strncpy_from_user: - Copy a NUL terminated string from userspace.
+ * @dst:   Destination address, in kernel space.  This buffer must be at
+ *         least @count bytes long.
+ * @src:   Source address, in user space.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from userspace to kernel space.
+ *
+ * On success, returns the length of the string (not including the trailing
+ * NUL).
+ *
+ * If access to userspace fails, returns -EFAULT (some data may have been
+ * copied).
+ *
+ * If @count is smaller than the length of the string, copies @count bytes
+ * and returns @count.
+ */
+long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	unsigned long max_addr, src_addr;
+
+	if (unlikely(count <= 0))
+		return 0;
+
+	max_addr = current_thread_info()->addr_limit.seg;
+	src_addr = (unsigned long)src;
+	if (likely(src_addr < max_addr)) {
+		unsigned long max = max_addr - src_addr;
+		return do_strncpy_from_user(dst, src, count, max);
+	}
+	return -EFAULT;
+}
+EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index d9b094c..ef2a6a5 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -33,93 +33,6 @@
 	__movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
 
 /*
- * Copy a null terminated string from userspace.
- */
-
-#define __do_strncpy_from_user(dst, src, count, res)			   \
-do {									   \
-	int __d0, __d1, __d2;						   \
-	might_fault();							   \
-	__asm__ __volatile__(						   \
-		"	testl %1,%1\n"					   \
-		"	jz 2f\n"					   \
-		"0:	lodsb\n"					   \
-		"	stosb\n"					   \
-		"	testb %%al,%%al\n"				   \
-		"	jz 1f\n"					   \
-		"	decl %1\n"					   \
-		"	jnz 0b\n"					   \
-		"1:	subl %1,%0\n"					   \
-		"2:\n"							   \
-		".section .fixup,\"ax\"\n"				   \
-		"3:	movl %5,%0\n"					   \
-		"	jmp 2b\n"					   \
-		".previous\n"						   \
-		_ASM_EXTABLE(0b,3b)					   \
-		: "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),	   \
-		  "=&D" (__d2)						   \
-		: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
-		: "memory");						   \
-} while (0)
-
-/**
- * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- * Caller must check the specified block with access_ok() before calling
- * this function.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res;
-	__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-EXPORT_SYMBOL(__strncpy_from_user);
-
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res = -EFAULT;
-	if (access_ok(VERIFY_READ, src, 1))
-		__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
  * Zero Userspace
  */
 
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b7c2849..0d0326f 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,55 +9,6 @@
 #include <asm/uaccess.h>
 
 /*
- * Copy a null terminated string from userspace.
- */
-
-#define __do_strncpy_from_user(dst,src,count,res)			   \
-do {									   \
-	long __d0, __d1, __d2;						   \
-	might_fault();							   \
-	__asm__ __volatile__(						   \
-		"	testq %1,%1\n"					   \
-		"	jz 2f\n"					   \
-		"0:	lodsb\n"					   \
-		"	stosb\n"					   \
-		"	testb %%al,%%al\n"				   \
-		"	jz 1f\n"					   \
-		"	decq %1\n"					   \
-		"	jnz 0b\n"					   \
-		"1:	subq %1,%0\n"					   \
-		"2:\n"							   \
-		".section .fixup,\"ax\"\n"				   \
-		"3:	movq %5,%0\n"					   \
-		"	jmp 2b\n"					   \
-		".previous\n"						   \
-		_ASM_EXTABLE(0b,3b)					   \
-		: "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),	   \
-		  "=&D" (__d2)						   \
-		: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
-		: "memory");						   \
-} while (0)
-
-long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res;
-	__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-EXPORT_SYMBOL(__strncpy_from_user);
-
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res = -EFAULT;
-	if (access_ok(VERIFY_READ, src, 1))
-		return __strncpy_from_user(dst, src, count);
-	return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
  * Zero Userspace
  */
 
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 6687022..877b9a1 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -18,17 +18,17 @@
  * r9d : hlen = skb->len - skb->data_len
  */
 #define SKBDATA	%r8
-
-sk_load_word_ind:
-	.globl	sk_load_word_ind
-
-	add	%ebx,%esi	/* offset += X */
-#	test    %esi,%esi	/* if (offset < 0) goto bpf_error; */
-	js	bpf_error
+#define SKF_MAX_NEG_OFF    $(-0x200000) /* SKF_LL_OFF from filter.h */
 
 sk_load_word:
 	.globl	sk_load_word
 
+	test	%esi,%esi
+	js	bpf_slow_path_word_neg
+
+sk_load_word_positive_offset:
+	.globl	sk_load_word_positive_offset
+
 	mov	%r9d,%eax		# hlen
 	sub	%esi,%eax		# hlen - offset
 	cmp	$3,%eax
@@ -37,16 +37,15 @@
 	bswap   %eax  			/* ntohl() */
 	ret
 
-
-sk_load_half_ind:
-	.globl sk_load_half_ind
-
-	add	%ebx,%esi	/* offset += X */
-	js	bpf_error
-
 sk_load_half:
 	.globl	sk_load_half
 
+	test	%esi,%esi
+	js	bpf_slow_path_half_neg
+
+sk_load_half_positive_offset:
+	.globl	sk_load_half_positive_offset
+
 	mov	%r9d,%eax
 	sub	%esi,%eax		#	hlen - offset
 	cmp	$1,%eax
@@ -55,14 +54,15 @@
 	rol	$8,%ax			# ntohs()
 	ret
 
-sk_load_byte_ind:
-	.globl sk_load_byte_ind
-	add	%ebx,%esi	/* offset += X */
-	js	bpf_error
-
 sk_load_byte:
 	.globl	sk_load_byte
 
+	test	%esi,%esi
+	js	bpf_slow_path_byte_neg
+
+sk_load_byte_positive_offset:
+	.globl	sk_load_byte_positive_offset
+
 	cmp	%esi,%r9d   /* if (offset >= hlen) goto bpf_slow_path_byte */
 	jle	bpf_slow_path_byte
 	movzbl	(SKBDATA,%rsi),%eax
@@ -73,25 +73,21 @@
  *
  * Implements BPF_S_LDX_B_MSH : ldxb  4*([offset]&0xf)
  * Must preserve A accumulator (%eax)
- * Inputs : %esi is the offset value, already known positive
+ * Inputs : %esi is the offset value
  */
-ENTRY(sk_load_byte_msh)
-	CFI_STARTPROC
+sk_load_byte_msh:
+	.globl	sk_load_byte_msh
+	test	%esi,%esi
+	js	bpf_slow_path_byte_msh_neg
+
+sk_load_byte_msh_positive_offset:
+	.globl	sk_load_byte_msh_positive_offset
 	cmp	%esi,%r9d      /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
 	jle	bpf_slow_path_byte_msh
 	movzbl	(SKBDATA,%rsi),%ebx
 	and	$15,%bl
 	shl	$2,%bl
 	ret
-	CFI_ENDPROC
-ENDPROC(sk_load_byte_msh)
-
-bpf_error:
-# force a return 0 from jit handler
-	xor		%eax,%eax
-	mov		-8(%rbp),%rbx
-	leaveq
-	ret
 
 /* rsi contains offset and can be scratched */
 #define bpf_slow_path_common(LEN)		\
@@ -138,3 +134,67 @@
 	shl	$2,%al
 	xchg	%eax,%ebx
 	ret
+
+#define sk_negative_common(SIZE)				\
+	push	%rdi;	/* save skb */				\
+	push	%r9;						\
+	push	SKBDATA;					\
+/* rsi already has offset */					\
+	mov	$SIZE,%ecx;	/* size */			\
+	call	bpf_internal_load_pointer_neg_helper;		\
+	test	%rax,%rax;					\
+	pop	SKBDATA;					\
+	pop	%r9;						\
+	pop	%rdi;						\
+	jz	bpf_error
+
+
+bpf_slow_path_word_neg:
+	cmp	SKF_MAX_NEG_OFF, %esi	/* test range */
+	jl	bpf_error	/* offset lower -> error  */
+sk_load_word_negative_offset:
+	.globl	sk_load_word_negative_offset
+	sk_negative_common(4)
+	mov	(%rax), %eax
+	bswap	%eax
+	ret
+
+bpf_slow_path_half_neg:
+	cmp	SKF_MAX_NEG_OFF, %esi
+	jl	bpf_error
+sk_load_half_negative_offset:
+	.globl	sk_load_half_negative_offset
+	sk_negative_common(2)
+	mov	(%rax),%ax
+	rol	$8,%ax
+	movzwl	%ax,%eax
+	ret
+
+bpf_slow_path_byte_neg:
+	cmp	SKF_MAX_NEG_OFF, %esi
+	jl	bpf_error
+sk_load_byte_negative_offset:
+	.globl	sk_load_byte_negative_offset
+	sk_negative_common(1)
+	movzbl	(%rax), %eax
+	ret
+
+bpf_slow_path_byte_msh_neg:
+	cmp	SKF_MAX_NEG_OFF, %esi
+	jl	bpf_error
+sk_load_byte_msh_negative_offset:
+	.globl	sk_load_byte_msh_negative_offset
+	xchg	%eax,%ebx /* dont lose A , X is about to be scratched */
+	sk_negative_common(1)
+	movzbl	(%rax),%eax
+	and	$15,%al
+	shl	$2,%al
+	xchg	%eax,%ebx
+	ret
+
+bpf_error:
+# force a return 0 from jit handler
+	xor		%eax,%eax
+	mov		-8(%rbp),%rbx
+	leaveq
+	ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5671752..0597f95 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -30,7 +30,10 @@
  * assembly code in arch/x86/net/bpf_jit.S
  */
 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
-extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
+extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
+extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
+extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
+extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
 
 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 {
@@ -117,6 +120,8 @@
 	set_fs(old_fs);
 }
 
+#define CHOOSE_LOAD_FUNC(K, func) \
+	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
 void bpf_jit_compile(struct sk_filter *fp)
 {
@@ -289,7 +294,7 @@
 					EMIT2(0x24, K & 0xFF); /* and imm8,%al */
 				} else if (K >= 0xFFFF0000) {
 					EMIT2(0x66, 0x25);	/* and imm16,%ax */
-					EMIT2(K, 2);
+					EMIT(K, 2);
 				} else {
 					EMIT1_off32(0x25, K);	/* and imm32,%eax */
 				}
@@ -473,44 +478,46 @@
 #endif
 				break;
 			case BPF_S_LD_W_ABS:
-				func = sk_load_word;
+				func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 common_load:			seen |= SEEN_DATAREF;
-				if ((int)K < 0) {
-					/* Abort the JIT because __load_pointer() is needed. */
-					goto out;
-				}
 				t_offset = func - (image + addrs[i]);
 				EMIT1_off32(0xbe, K); /* mov imm32,%esi */
 				EMIT1_off32(0xe8, t_offset); /* call */
 				break;
 			case BPF_S_LD_H_ABS:
-				func = sk_load_half;
+				func = CHOOSE_LOAD_FUNC(K, sk_load_half);
 				goto common_load;
 			case BPF_S_LD_B_ABS:
-				func = sk_load_byte;
+				func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
 				goto common_load;
 			case BPF_S_LDX_B_MSH:
-				if ((int)K < 0) {
-					/* Abort the JIT because __load_pointer() is needed. */
-					goto out;
-				}
+				func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
 				seen |= SEEN_DATAREF | SEEN_XREG;
-				t_offset = sk_load_byte_msh - (image + addrs[i]);
+				t_offset = func - (image + addrs[i]);
 				EMIT1_off32(0xbe, K);	/* mov imm32,%esi */
 				EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
 				break;
 			case BPF_S_LD_W_IND:
-				func = sk_load_word_ind;
+				func = sk_load_word;
 common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
 				t_offset = func - (image + addrs[i]);
-				EMIT1_off32(0xbe, K);	/* mov imm32,%esi   */
+				if (K) {
+					if (is_imm8(K)) {
+						EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
+					} else {
+						EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
+						EMIT(K, 4);
+					}
+				} else {
+					EMIT2(0x89,0xde); /* mov %ebx,%esi */
+				}
 				EMIT1_off32(0xe8, t_offset);	/* call sk_load_xxx_ind */
 				break;
 			case BPF_S_LD_H_IND:
-				func = sk_load_half_ind;
+				func = sk_load_half;
 				goto common_load_ind;
 			case BPF_S_LD_B_IND:
-				func = sk_load_byte_ind;
+				func = sk_load_byte;
 				goto common_load_ind;
 			case BPF_S_JMP_JA:
 				t_offset = addrs[i + K] - addrs[i];
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 4793683..218cdb1 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -225,13 +225,13 @@
 	fix_processor_context();
 
 	do_fpu_end();
+	x86_platform.restore_sched_clock_state();
 	mtrr_bp_restore();
 }
 
 /* Needed by apm.c */
 void restore_processor_state(void)
 {
-	x86_platform.restore_sched_clock_state();
 	__restore_processor_state(&saved_context);
 }
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
new file mode 100644
index 0000000..7d01b8c
--- /dev/null
+++ b/arch/x86/um/asm/barrier.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_UM_BARRIER_H_
+#define _ASM_UM_BARRIER_H_
+
+#include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
+#include <asm/nops.h>
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#ifdef CONFIG_X86_32
+
+#define mb()	alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb()	alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb()	alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+
+#else /* CONFIG_X86_32 */
+
+#define mb()	asm volatile("mfence" : : : "memory")
+#define rmb()	asm volatile("lfence" : : : "memory")
+#define wmb()	asm volatile("sfence" : : : "memory")
+
+#endif /* CONFIG_X86_32 */
+
+#define read_barrier_depends()	do { } while (0)
+
+#ifdef CONFIG_SMP
+
+#define smp_mb()	mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+#define smp_rmb()	rmb()
+#else /* CONFIG_X86_PPRO_FENCE */
+#define smp_rmb()	barrier()
+#endif /* CONFIG_X86_PPRO_FENCE */
+
+#ifdef CONFIG_X86_OOSTORE
+#define smp_wmb()	wmb()
+#else /* CONFIG_X86_OOSTORE */
+#define smp_wmb()	barrier()
+#endif /* CONFIG_X86_OOSTORE */
+
+#define smp_read_barrier_depends()	read_barrier_depends()
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+
+#else /* CONFIG_SMP */
+
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
+#endif
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h
deleted file mode 100644
index a459fd9..0000000
--- a/arch/x86/um/asm/system.h
+++ /dev/null
@@ -1,135 +0,0 @@
-#ifndef _ASM_X86_SYSTEM_H_
-#define _ASM_X86_SYSTEM_H_
-
-#include <asm/asm.h>
-#include <asm/segment.h>
-#include <asm/cpufeature.h>
-#include <asm/cmpxchg.h>
-#include <asm/nops.h>
-
-#include <linux/kernel.h>
-#include <linux/irqflags.h>
-
-/* entries in ARCH_DLINFO: */
-#ifdef CONFIG_IA32_EMULATION
-# define AT_VECTOR_SIZE_ARCH 2
-#else
-# define AT_VECTOR_SIZE_ARCH 1
-#endif
-
-extern unsigned long arch_align_stack(unsigned long sp);
-
-void default_idle(void);
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- */
-#ifdef CONFIG_X86_32
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-#else
-#define mb() 	asm volatile("mfence":::"memory")
-#define rmb()	asm volatile("lfence":::"memory")
-#define wmb()	asm volatile("sfence" ::: "memory")
-#endif
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier.  All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies.  See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	b = 2;
- *	memory_barrier();
- *	p = &b;				q = p;
- *					read_barrier_depends();
- *					d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	a = 2;
- *	memory_barrier();
- *	b = 3;				y = b;
- *					read_barrier_depends();
- *					x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends()	do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#ifdef CONFIG_X86_PPRO_FENCE
-# define smp_rmb()	rmb()
-#else
-# define smp_rmb()	barrier()
-#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() 	wmb()
-#else
-# define smp_wmb()	barrier()
-#endif
-#define smp_read_barrier_depends()	read_barrier_depends()
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do { } while (0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
-/*
- * Stop RDTSC speculation. This is needed when you need to use RDTSC
- * (or get_cycles or vread that possibly accesses the TSC) in a defined
- * code region.
- *
- * (Could use an alternative three way for this if there was one.)
- */
-static inline void rdtsc_barrier(void)
-{
-	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
-	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
-}
-
-extern void *_switch_to(void *prev, void *next, void *last);
-#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
-
-#endif
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b132ade..4f51beb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -967,7 +967,7 @@
 	xen_setup_mfn_list_list();
 }
 
-/* This is called once we have the cpu_possible_map */
+/* This is called once we have the cpu_possible_mask */
 void xen_setup_vcpu_info_placement(void)
 {
 	int cpu;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 988828b..b8e2794 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1859,6 +1859,7 @@
 #endif	/* CONFIG_X86_64 */
 
 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
+static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss;
 
 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 {
@@ -1899,7 +1900,7 @@
 		 * We just don't map the IO APIC - all access is via
 		 * hypercalls.  Keep the address in the pte for reference.
 		 */
-		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+		pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL);
 		break;
 #endif
 
@@ -2064,6 +2065,7 @@
 	pv_mmu_ops = xen_mmu_ops;
 
 	memset(dummy_mapping, 0xff, PAGE_SIZE);
+	memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE);
 }
 
 /* Protected by xen_reservation_lock. */
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index b480d42..967633a 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -12,8 +12,8 @@
 
 static struct dma_map_ops xen_swiotlb_dma_ops = {
 	.mapping_error = xen_swiotlb_dma_mapping_error,
-	.alloc_coherent = xen_swiotlb_alloc_coherent,
-	.free_coherent = xen_swiotlb_free_coherent,
+	.alloc = xen_swiotlb_alloc_coherent,
+	.free = xen_swiotlb_free_coherent,
 	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
 	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
 	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 02900e8..5fac691 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -59,7 +59,7 @@
 
 static void __cpuinit cpu_bringup(void)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 
 	cpu_init();
 	touch_softlockup_watchdog();
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 21ff9d0..8e84225 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -627,7 +627,7 @@
 
 config CRYPTO_BLOWFISH_X86_64
 	tristate "Blowfish cipher algorithm (x86_64)"
-	depends on (X86 || UML_X86) && 64BIT
+	depends on X86 && 64BIT
 	select CRYPTO_ALGAPI
 	select CRYPTO_BLOWFISH_COMMON
 	help
@@ -657,7 +657,7 @@
 
 config CRYPTO_CAMELLIA_X86_64
 	tristate "Camellia cipher algorithm (x86_64)"
-	depends on (X86 || UML_X86) && 64BIT
+	depends on X86 && 64BIT
 	depends on CRYPTO
 	select CRYPTO_ALGAPI
 	select CRYPTO_LRW
@@ -893,7 +893,7 @@
 
 config CRYPTO_TWOFISH_X86_64_3WAY
 	tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
-	depends on (X86 || UML_X86) && 64BIT
+	depends on X86 && 64BIT
 	select CRYPTO_ALGAPI
 	select CRYPTO_TWOFISH_COMMON
 	select CRYPTO_TWOFISH_X86_64
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a0f768c..8d3a056 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -613,8 +613,7 @@
 	return err;
 }
 
-static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
-						 u32 mask)
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
 {
 	struct crypto_alg *alg;
 
@@ -652,6 +651,7 @@
 
 	return ERR_PTR(crypto_givcipher_default(alg, type, mask));
 }
+EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
 
 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
 			 u32 type, u32 mask)
diff --git a/crypto/aead.c b/crypto/aead.c
index 04add3dc..e4cb351 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -470,8 +470,7 @@
 	return err;
 }
 
-static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
-					     u32 mask)
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
 {
 	struct crypto_alg *alg;
 
@@ -503,6 +502,7 @@
 
 	return ERR_PTR(crypto_nivaead_default(alg, type, mask));
 }
+EXPORT_SYMBOL_GPL(crypto_lookup_aead);
 
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
 		     u32 type, u32 mask)
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index f76e42b..f1ea0a0 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -21,9 +21,13 @@
 #include <linux/module.h>
 #include <linux/crypto.h>
 #include <linux/cryptouser.h>
+#include <linux/sched.h>
 #include <net/netlink.h>
 #include <linux/security.h>
 #include <net/net_namespace.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
 #include "internal.h"
 
 DEFINE_MUTEX(crypto_cfg_mutex);
@@ -301,6 +305,60 @@
 	return crypto_unregister_instance(alg);
 }
 
+static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
+						   u32 mask)
+{
+	int err;
+	struct crypto_alg *alg;
+
+	type = crypto_skcipher_type(type);
+	mask = crypto_skcipher_mask(mask);
+
+	for (;;) {
+		alg = crypto_lookup_skcipher(name,  type, mask);
+		if (!IS_ERR(alg))
+			return alg;
+
+		err = PTR_ERR(alg);
+		if (err != -EAGAIN)
+			break;
+		if (signal_pending(current)) {
+			err = -EINTR;
+			break;
+		}
+	}
+
+	return ERR_PTR(err);
+}
+
+static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
+					       u32 mask)
+{
+	int err;
+	struct crypto_alg *alg;
+
+	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+	type |= CRYPTO_ALG_TYPE_AEAD;
+	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+	mask |= CRYPTO_ALG_TYPE_MASK;
+
+	for (;;) {
+		alg = crypto_lookup_aead(name,  type, mask);
+		if (!IS_ERR(alg))
+			return alg;
+
+		err = PTR_ERR(alg);
+		if (err != -EAGAIN)
+			break;
+		if (signal_pending(current)) {
+			err = -EINTR;
+			break;
+		}
+	}
+
+	return ERR_PTR(err);
+}
+
 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
 			  struct nlattr **attrs)
 {
@@ -325,7 +383,19 @@
 	else
 		name = p->cru_name;
 
-	alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+	switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
+	case CRYPTO_ALG_TYPE_AEAD:
+		alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
+		break;
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+	case CRYPTO_ALG_TYPE_BLKCIPHER:
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
+		break;
+	default:
+		alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+	}
+
 	if (IS_ERR(alg))
 		return PTR_ERR(alg);
 
@@ -387,12 +457,20 @@
 
 	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
 	    (nlh->nlmsg_flags & NLM_F_DUMP))) {
+		struct crypto_alg *alg;
+		u16 dump_alloc = 0;
+
 		if (link->dump == NULL)
 			return -EINVAL;
+
+		list_for_each_entry(alg, &crypto_alg_list, cra_list)
+			dump_alloc += CRYPTO_REPORT_MAXSIZE;
+
 		{
 			struct netlink_dump_control c = {
 				.dump = link->dump,
 				.done = link->done,
+				.min_dump_alloc = dump_alloc,
 			};
 			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
 		}
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 29a89da..b2c99dc 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -280,11 +280,11 @@
 
 	ictx->tfm_count++;
 
-	cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
+	cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
 
-	ctx->cb_cpu = cpumask_first(cpu_active_mask);
+	ctx->cb_cpu = cpumask_first(cpu_online_mask);
 	for (cpu = 0; cpu < cpu_index; cpu++)
-		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
+		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
 
 	cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
 
@@ -472,7 +472,7 @@
 		goto err_free_padata;
 	}
 
-	cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
+	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
 	rcu_assign_pointer(pcrypt->cb_cpumask, mask);
 
 	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 6f0459c..d236aef 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -50,6 +50,8 @@
 
 source "drivers/spi/Kconfig"
 
+source "drivers/hsi/Kconfig"
+
 source "drivers/pps/Kconfig"
 
 source "drivers/ptp/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 262b19d..95952c8 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -53,6 +53,7 @@
 obj-$(CONFIG_TARGET_CORE)	+= target/
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
+obj-y				+= hsi/
 obj-y				+= net/
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_FUSION)		+= message/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7556913..47768ff 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -384,6 +384,15 @@
 	  load additional kernel modules after boot, this feature may be used
 	  to override that restriction).
 
+config ACPI_BGRT
+        tristate "Boottime Graphics Resource Table support"
+        default n
+        help
+	  This driver adds support for exposing the ACPI Boottime Graphics
+	  Resource Table, which allows the operating system to obtain
+	  data from the firmware boot splash. It will appear under
+	  /sys/firmware/acpi/bgrt/ .
+
 source "drivers/acpi/apei/Kconfig"
 
 endif	# ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 1567028..47199e2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -62,6 +62,7 @@
 obj-$(CONFIG_ACPI_HED)		+= hed.o
 obj-$(CONFIG_ACPI_EC_DEBUGFS)	+= ec_sys.o
 obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
+obj-$(CONFIG_ACPI_BGRT)		+= bgrt.o
 
 # processor has its own "processor." module_param namespace
 processor-y			:= processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 0ca208b..793b8cc 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -68,12 +68,14 @@
 
 acpi-y +=		\
 	hwacpi.o	\
+	hwesleep.o	\
 	hwgpe.o		\
 	hwpci.o		\
 	hwregs.o	\
 	hwsleep.o	\
 	hwvalid.o	\
-	hwxface.o
+	hwxface.o	\
+	hwxfsleep.o
 
 acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
 
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index a44bd42..8a7d51b 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -51,7 +51,6 @@
  *
  * Note: The order of these include files is important.
  */
-#include "acconfig.h"		/* Global configuration constants */
 #include "acmacros.h"		/* C macros */
 #include "aclocal.h"		/* Internal data types */
 #include "acobject.h"		/* ACPI internal object */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index deaa819..5e8abb0 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -111,7 +111,7 @@
 
 void acpi_db_set_scope(char *name);
 
-acpi_status acpi_db_sleep(char *object_arg);
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg))
 
 void acpi_db_find_references(char *object_arg);
 
@@ -119,11 +119,13 @@
 
 void acpi_db_display_resources(char *object_arg);
 
-void acpi_db_display_gpes(void);
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
 
 void acpi_db_check_integrity(void);
 
-void acpi_db_generate_gpe(char *gpe_arg, char *block_arg);
+ACPI_HW_DEPENDENT_RETURN_VOID(void
+			      acpi_db_generate_gpe(char *gpe_arg,
+						   char *block_arg))
 
 void acpi_db_check_predefined_names(void);
 
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c53caa5..d700f63 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -69,11 +69,10 @@
  */
 acpi_status acpi_ev_init_global_lock_handler(void);
 
-acpi_status acpi_ev_acquire_global_lock(u16 timeout);
-
-acpi_status acpi_ev_release_global_lock(void);
-
-acpi_status acpi_ev_remove_global_lock_handler(void);
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
+			    acpi_ev_acquire_global_lock(u16 timeout))
+ ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+ acpi_status acpi_ev_remove_global_lock_handler(void);
 
 /*
  * evgpe - Low-level GPE support
@@ -114,7 +113,9 @@
 			     struct acpi_gpe_block_info *gpe_block,
 			     void *context);
 
-acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
+			    acpi_ev_delete_gpe_block(struct acpi_gpe_block_info
+						     *gpe_block))
 
 u32
 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
@@ -126,9 +127,10 @@
  */
 acpi_status acpi_ev_gpe_initialize(void);
 
-void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
+ACPI_HW_DEPENDENT_RETURN_VOID(void
+			      acpi_ev_update_gpes(acpi_owner_id table_owner_id))
 
-acpi_status
+ acpi_status
 acpi_ev_match_gpe_method(acpi_handle obj_handle,
 			 u32 level, void *context, void **return_value);
 
@@ -237,6 +239,5 @@
 
 u32 acpi_ev_initialize_sCI(u32 program_sCI);
 
-void acpi_ev_terminate(void);
-
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
 #endif				/* __ACEVENTS_H__  */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 2853f76..4f7d3f5 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -147,7 +147,7 @@
  */
 u8 acpi_gbl_reduced_hardware;
 
-#endif
+#endif				/* DEFINE_ACPI_GLOBALS */
 
 /* Do not disassemble buffers to resource descriptors */
 
@@ -184,8 +184,12 @@
  * found in the RSDT/XSDT.
  */
 ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
+
+#if (!ACPI_REDUCED_HARDWARE)
 ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
 
+#endif				/* !ACPI_REDUCED_HARDWARE */
+
 /* These addresses are calculated from the FADT Event Block addresses */
 
 ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
@@ -397,10 +401,15 @@
 ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
 ACPI_EXTERN struct acpi_gpe_block_info
 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+
+#if (!ACPI_REDUCED_HARDWARE)
+
 ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
 ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
 ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
 
+#endif				/* !ACPI_REDUCED_HARDWARE */
+
 /*****************************************************************************
  *
  * Debugger globals
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 677793e..5ccb99a 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -81,6 +81,26 @@
 acpi_status acpi_hw_clear_acpi_status(void);
 
 /*
+ * hwsleep - sleep/wake support (Legacy sleep registers)
+ */
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
+
+/*
+ * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
+ */
+void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
+
+acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
+
+acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
+
+/*
  * hwvalid - Port I/O with validation
  */
 acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width);
@@ -128,16 +148,4 @@
 acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
 		      acpi_handle root_pci_device, acpi_handle pci_region);
 
-#ifdef	ACPI_FUTURE_USAGE
-/*
- * hwtimer - ACPI Timer prototypes
- */
-acpi_status acpi_get_timer_resolution(u32 * resolution);
-
-acpi_status acpi_get_timer(u32 * ticks);
-
-acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed);
-#endif				/* ACPI_FUTURE_USAGE */
-
 #endif				/* __ACHWARE_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 3f24068..e3922ca 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -370,6 +370,7 @@
 /* Defines for Flags field above */
 
 #define ACPI_OBJECT_REPAIRED    1
+#define ACPI_OBJECT_WRAPPED     2
 
 /*
  * Bitmapped return value types
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index ef338a9..f119f47 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -516,6 +516,12 @@
 
 #endif				/* ACPI_DEBUG_OUTPUT */
 
+#if (!ACPI_REDUCED_HARDWARE)
+#define ACPI_HW_OPTIONAL_FUNCTION(addr)     addr
+#else
+#define ACPI_HW_OPTIONAL_FUNCTION(addr)     NULL
+#endif
+
 /*
  * Some code only gets executed when the debugger is built in.
  * Note that this is entirely independent of whether the
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 2c9e0f0..9b19d4b 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -283,8 +283,9 @@
 		      union acpi_operand_object **return_object_ptr);
 
 acpi_status
-acpi_ns_repair_package_list(struct acpi_predefined_data *data,
-			    union acpi_operand_object **obj_desc_ptr);
+acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
+			  union acpi_operand_object *original_object,
+			  union acpi_operand_object **obj_desc_ptr);
 
 acpi_status
 acpi_ns_repair_null_element(struct acpi_predefined_data *data,
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index d5bec30..6712965 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -67,6 +67,11 @@
 
 acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
 
+struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
+						 *table_header,
+						 struct acpi_table_desc
+						 *table_desc);
+
 acpi_status
 acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index);
 
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 6729ebe..07e4dc4 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -47,7 +47,7 @@
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evevent")
-
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /* Local prototypes */
 static acpi_status acpi_ev_fixed_event_initialize(void);
 
@@ -291,3 +291,5 @@
 	return ((acpi_gbl_fixed_event_handlers[event].
 		 handler) (acpi_gbl_fixed_event_handlers[event].context));
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 5e5683c..cfeab38 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -48,7 +48,7 @@
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evglock")
-
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /* Local prototypes */
 static u32 acpi_ev_global_lock_handler(void *context);
 
@@ -339,3 +339,5 @@
 	acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
 	return_ACPI_STATUS(status);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 9e88cb6..8ba0e5f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -48,7 +48,7 @@
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evgpe")
-
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /* Local prototypes */
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
 
@@ -766,3 +766,5 @@
 
 	return_UINT32(ACPI_INTERRUPT_HANDLED);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index be75339..23a3ca8 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -48,7 +48,7 @@
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evgpeblk")
-
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /* Local prototypes */
 static acpi_status
 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
@@ -504,3 +504,5 @@
 
 	return_ACPI_STATUS(AE_OK);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index adf7494..da0add8 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -48,7 +48,7 @@
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evgpeinit")
-
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /*
  * Note: History of _PRW support in ACPICA
  *
@@ -440,3 +440,5 @@
 			  name, gpe_number));
 	return_ACPI_STATUS(AE_OK);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 2507393..3c43796 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -48,6 +48,7 @@
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evgpeutil")
 
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_walk_gpe_list
@@ -374,3 +375,5 @@
 
 	return_ACPI_STATUS(AE_OK);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 84966f4..51ef9f5 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -108,27 +108,30 @@
 	ACPI_FUNCTION_NAME(ev_queue_notify_request);
 
 	/*
-	 * For value 3 (Ejection Request), some device method may need to be run.
-	 * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need
-	 *   to be run.
+	 * For value 0x03 (Ejection Request), may need to run a device method.
+	 * For value 0x02 (Device Wake), if _PRW exists, may need to run
+	 *   the _PS0 method.
 	 * For value 0x80 (Status Change) on the power button or sleep button,
-	 *   initiate soft-off or sleep operation?
+	 *   initiate soft-off or sleep operation.
+	 *
+	 * For all cases, simply dispatch the notify to the handler.
 	 */
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
-			  acpi_ut_get_node_name(node), node, notify_value,
-			  acpi_ut_get_notify_name(notify_value)));
+			  "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
+			  acpi_ut_get_node_name(node),
+			  acpi_ut_get_type_name(node->type), notify_value,
+			  acpi_ut_get_notify_name(notify_value), node));
 
 	/* Get the notify object attached to the NS Node */
 
 	obj_desc = acpi_ns_get_attached_object(node);
 	if (obj_desc) {
 
-		/* We have the notify object, Get the right handler */
+		/* We have the notify object, Get the correct handler */
 
 		switch (node->type) {
 
-			/* Notify allowed only on these types */
+			/* Notify is allowed only on these types */
 
 		case ACPI_TYPE_DEVICE:
 		case ACPI_TYPE_THERMAL:
@@ -152,7 +155,7 @@
 	}
 
 	/*
-	 * If there is any handler to run, schedule the dispatcher.
+	 * If there is a handler to run, schedule the dispatcher.
 	 * Check for:
 	 * 1) Global system notify handler
 	 * 2) Global device notify handler
@@ -270,6 +273,7 @@
 	acpi_ut_delete_generic_state(notify_info);
 }
 
+#if (!ACPI_REDUCED_HARDWARE)
 /******************************************************************************
  *
  * FUNCTION:    acpi_ev_terminate
@@ -338,3 +342,5 @@
 	}
 	return_VOID;
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 26065c6..6a57aa2 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -48,7 +48,7 @@
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evsci")
-
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /* Local prototypes */
 static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
 
@@ -181,3 +181,5 @@
 
 	return_ACPI_STATUS(status);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 61944e8..44bef57 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -51,222 +51,6 @@
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evxface")
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_exception_handler
- *
- * PARAMETERS:  Handler         - Pointer to the handler function for the
- *                                event
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Saves the pointer to the handler function
- *
- ******************************************************************************/
-#ifdef ACPI_FUTURE_USAGE
-acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Don't allow two handlers. */
-
-	if (acpi_gbl_exception_handler) {
-		status = AE_ALREADY_EXISTS;
-		goto cleanup;
-	}
-
-	/* Install the handler */
-
-	acpi_gbl_exception_handler = handler;
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
-#endif				/*  ACPI_FUTURE_USAGE  */
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_global_event_handler
- *
- * PARAMETERS:  Handler         - Pointer to the global event handler function
- *              Context         - Value passed to the handler on each event
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Saves the pointer to the handler function. The global handler
- *              is invoked upon each incoming GPE and Fixed Event. It is
- *              invoked at interrupt level at the time of the event dispatch.
- *              Can be used to update event counters, etc.
- *
- ******************************************************************************/
-acpi_status
-acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
-
-	/* Parameter validation */
-
-	if (!handler) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Don't allow two handlers. */
-
-	if (acpi_gbl_global_event_handler) {
-		status = AE_ALREADY_EXISTS;
-		goto cleanup;
-	}
-
-	acpi_gbl_global_event_handler = handler;
-	acpi_gbl_global_event_handler_context = context;
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_fixed_event_handler
- *
- * PARAMETERS:  Event           - Event type to enable.
- *              Handler         - Pointer to the handler function for the
- *                                event
- *              Context         - Value passed to the handler on each GPE
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Saves the pointer to the handler function and then enables the
- *              event.
- *
- ******************************************************************************/
-acpi_status
-acpi_install_fixed_event_handler(u32 event,
-				 acpi_event_handler handler, void *context)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
-
-	/* Parameter validation */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Don't allow two handlers. */
-
-	if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
-		status = AE_ALREADY_EXISTS;
-		goto cleanup;
-	}
-
-	/* Install the handler before enabling the event */
-
-	acpi_gbl_fixed_event_handlers[event].handler = handler;
-	acpi_gbl_fixed_event_handlers[event].context = context;
-
-	status = acpi_clear_event(event);
-	if (ACPI_SUCCESS(status))
-		status = acpi_enable_event(event, 0);
-	if (ACPI_FAILURE(status)) {
-		ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
-			      event));
-
-		/* Remove the handler */
-
-		acpi_gbl_fixed_event_handlers[event].handler = NULL;
-		acpi_gbl_fixed_event_handlers[event].context = NULL;
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Enabled fixed event %X, Handler=%p\n", event,
-				  handler));
-	}
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_fixed_event_handler
- *
- * PARAMETERS:  Event           - Event type to disable.
- *              Handler         - Address of the handler
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disables the event and unregisters the event handler.
- *
- ******************************************************************************/
-acpi_status
-acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
-
-	/* Parameter validation */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Disable the event before removing the handler */
-
-	status = acpi_disable_event(event, 0);
-
-	/* Always Remove the handler */
-
-	acpi_gbl_fixed_event_handlers[event].handler = NULL;
-	acpi_gbl_fixed_event_handlers[event].context = NULL;
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_WARNING((AE_INFO,
-			      "Could not write to fixed event enable register 0x%X",
-			      event));
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
-				  event));
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
 
 /*******************************************************************************
  *
@@ -334,6 +118,7 @@
 	return AE_OK;
 }
 
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_notify_handler
@@ -705,6 +490,224 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_install_exception_handler
+ *
+ * PARAMETERS:  Handler         - Pointer to the handler function for the
+ *                                event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function
+ *
+ ******************************************************************************/
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow two handlers. */
+
+	if (acpi_gbl_exception_handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	/* Install the handler */
+
+	acpi_gbl_exception_handler = handler;
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
+#endif				/*  ACPI_FUTURE_USAGE  */
+
+#if (!ACPI_REDUCED_HARDWARE)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_global_event_handler
+ *
+ * PARAMETERS:  Handler         - Pointer to the global event handler function
+ *              Context         - Value passed to the handler on each event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function. The global handler
+ *              is invoked upon each incoming GPE and Fixed Event. It is
+ *              invoked at interrupt level at the time of the event dispatch.
+ *              Can be used to update event counters, etc.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
+
+	/* Parameter validation */
+
+	if (!handler) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow two handlers. */
+
+	if (acpi_gbl_global_event_handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	acpi_gbl_global_event_handler = handler;
+	acpi_gbl_global_event_handler_context = context;
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_fixed_event_handler
+ *
+ * PARAMETERS:  Event           - Event type to enable.
+ *              Handler         - Pointer to the handler function for the
+ *                                event
+ *              Context         - Value passed to the handler on each GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function and then enables the
+ *              event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_fixed_event_handler(u32 event,
+				 acpi_event_handler handler, void *context)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
+
+	/* Parameter validation */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow two handlers. */
+
+	if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	/* Install the handler before enabling the event */
+
+	acpi_gbl_fixed_event_handlers[event].handler = handler;
+	acpi_gbl_fixed_event_handlers[event].context = context;
+
+	status = acpi_clear_event(event);
+	if (ACPI_SUCCESS(status))
+		status = acpi_enable_event(event, 0);
+	if (ACPI_FAILURE(status)) {
+		ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
+			      event));
+
+		/* Remove the handler */
+
+		acpi_gbl_fixed_event_handlers[event].handler = NULL;
+		acpi_gbl_fixed_event_handlers[event].context = NULL;
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Enabled fixed event %X, Handler=%p\n", event,
+				  handler));
+	}
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_fixed_event_handler
+ *
+ * PARAMETERS:  Event           - Event type to disable.
+ *              Handler         - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disables the event and unregisters the event handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
+
+	/* Parameter validation */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Disable the event before removing the handler */
+
+	status = acpi_disable_event(event, 0);
+
+	/* Always Remove the handler */
+
+	acpi_gbl_fixed_event_handlers[event].handler = NULL;
+	acpi_gbl_fixed_event_handlers[event].context = NULL;
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_WARNING((AE_INFO,
+			      "Could not write to fixed event enable register 0x%X",
+			      event));
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
+				  event));
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_install_gpe_handler
  *
  * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
@@ -984,3 +987,4 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 1768bbe..77cee5a 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -49,6 +49,7 @@
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evxfevnt")
 
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /*******************************************************************************
  *
  * FUNCTION:    acpi_enable
@@ -352,3 +353,4 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_event_status)
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 33388fd..86f9b34 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -50,6 +50,7 @@
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evxfgpe")
 
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /******************************************************************************
  *
  * FUNCTION:    acpi_update_all_gpes
@@ -695,3 +696,4 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index d21ec5f..d0b9ed5 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -48,6 +48,7 @@
 #define _COMPONENT          ACPI_HARDWARE
 ACPI_MODULE_NAME("hwacpi")
 
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /******************************************************************************
  *
  * FUNCTION:    acpi_hw_set_mode
@@ -166,3 +167,5 @@
 		return_UINT32(ACPI_SYS_MODE_LEGACY);
 	}
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
new file mode 100644
index 0000000..29e8592
--- /dev/null
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -0,0 +1,247 @@
+/******************************************************************************
+ *
+ * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
+ *                    extended FADT-V5 sleep registers.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwesleep")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_execute_sleep_method
+ *
+ * PARAMETERS:  method_pathname     - Pathname of method to execute
+ *              integer_argument    - Argument to pass to the method
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Execute a sleep/wake related method with one integer argument
+ *              and no return value.
+ *
+ ******************************************************************************/
+void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
+{
+	struct acpi_object_list arg_list;
+	union acpi_object arg;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(hw_execute_sleep_method);
+
+	/* One argument, integer_argument; No return value expected */
+
+	arg_list.count = 1;
+	arg_list.pointer = &arg;
+	arg.type = ACPI_TYPE_INTEGER;
+	arg.integer.value = (u64)integer_argument;
+
+	status = acpi_evaluate_object(NULL, method_pathname, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		ACPI_EXCEPTION((AE_INFO, status, "While executing method %s",
+				method_pathname));
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_extended_sleep
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state to enter
+ *              Flags               - ACPI_EXECUTE_GTS to run optional method
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enter a system sleep state via the extended FADT sleep
+ *              registers (V5 FADT).
+ *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
+{
+	acpi_status status;
+	u8 sleep_type_value;
+	u64 sleep_status;
+
+	ACPI_FUNCTION_TRACE(hw_extended_sleep);
+
+	/* Extended sleep registers must be valid */
+
+	if (!acpi_gbl_FADT.sleep_control.address ||
+	    !acpi_gbl_FADT.sleep_status.address) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Clear wake status (WAK_STS) */
+
+	status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	acpi_gbl_system_awake_and_running = FALSE;
+
+	/* Optionally execute _GTS (Going To Sleep) */
+
+	if (flags & ACPI_EXECUTE_GTS) {
+		acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
+	}
+
+	/* Flush caches, as per ACPI specification */
+
+	ACPI_FLUSH_CPU_CACHE();
+
+	/*
+	 * Set the SLP_TYP and SLP_EN bits.
+	 *
+	 * Note: We only use the first value returned by the \_Sx method
+	 * (acpi_gbl_sleep_type_a) - As per ACPI specification.
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+			  "Entering sleep state [S%u]\n", sleep_state));
+
+	sleep_type_value =
+	    ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
+	     ACPI_X_SLEEP_TYPE_MASK);
+
+	status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+			    &acpi_gbl_FADT.sleep_control);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Wait for transition back to Working State */
+
+	do {
+		status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+	} while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS));
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_extended_wake_prep
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state we just exited
+ *              Flags               - ACPI_EXECUTE_BFS to run optional method
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform first part of OS-independent ACPI cleanup after
+ *              a sleep. Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
+{
+	acpi_status status;
+	u8 sleep_type_value;
+
+	ACPI_FUNCTION_TRACE(hw_extended_wake_prep);
+
+	status = acpi_get_sleep_type_data(ACPI_STATE_S0,
+					  &acpi_gbl_sleep_type_a,
+					  &acpi_gbl_sleep_type_b);
+	if (ACPI_SUCCESS(status)) {
+		sleep_type_value =
+		    ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
+		     ACPI_X_SLEEP_TYPE_MASK);
+
+		(void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+				 &acpi_gbl_FADT.sleep_control);
+	}
+
+	/* Optionally execute _BFS (Back From Sleep) */
+
+	if (flags & ACPI_EXECUTE_BFS) {
+		acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
+	}
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_extended_wake
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state we just exited
+ *              Flags               - Reserved, set to zero
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
+ *              Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
+{
+	ACPI_FUNCTION_TRACE(hw_extended_wake);
+
+	/* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
+
+	acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
+
+	/* Execute the wake methods */
+
+	acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
+	acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
+
+	/*
+	 * Some BIOS code assumes that WAK_STS will be cleared on resume
+	 * and use it to determine whether the system is rebooting or
+	 * resuming. Clear WAK_STS for compatibility.
+	 */
+	(void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+	acpi_gbl_system_awake_and_running = TRUE;
+
+	acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 1a6894a..25bd28c 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -48,7 +48,7 @@
 
 #define _COMPONENT          ACPI_HARDWARE
 ACPI_MODULE_NAME("hwgpe")
-
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /* Local prototypes */
 static acpi_status
 acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
@@ -479,3 +479,5 @@
 	status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
 	return_ACPI_STATUS(status);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 4ea4eeb5..6b6c83b 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -51,6 +51,7 @@
 #define _COMPONENT          ACPI_HARDWARE
 ACPI_MODULE_NAME("hwregs")
 
+#if (!ACPI_REDUCED_HARDWARE)
 /* Local Prototypes */
 static acpi_status
 acpi_hw_read_multiple(u32 *value,
@@ -62,6 +63,8 @@
 		       struct acpi_generic_address *register_a,
 		       struct acpi_generic_address *register_b);
 
+#endif				/* !ACPI_REDUCED_HARDWARE */
+
 /******************************************************************************
  *
  * FUNCTION:    acpi_hw_validate_register
@@ -154,6 +157,7 @@
 acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
 {
 	u64 address;
+	u64 value64;
 	acpi_status status;
 
 	ACPI_FUNCTION_NAME(hw_read);
@@ -175,7 +179,9 @@
 	 */
 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 		status = acpi_os_read_memory((acpi_physical_address)
-					     address, value, reg->bit_width);
+					     address, &value64, reg->bit_width);
+
+		*value = (u32)value64;
 	} else {		/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
 		status = acpi_hw_read_port((acpi_io_address)
@@ -225,7 +231,8 @@
 	 */
 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 		status = acpi_os_write_memory((acpi_physical_address)
-					      address, value, reg->bit_width);
+					      address, (u64)value,
+					      reg->bit_width);
 	} else {		/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
 		status = acpi_hw_write_port((acpi_io_address)
@@ -240,6 +247,7 @@
 	return (status);
 }
 
+#if (!ACPI_REDUCED_HARDWARE)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_hw_clear_acpi_status
@@ -285,7 +293,7 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_hw_get_register_bit_mask
+ * FUNCTION:    acpi_hw_get_bit_register_info
  *
  * PARAMETERS:  register_id         - Index of ACPI Register to access
  *
@@ -658,3 +666,5 @@
 
 	return (status);
 }
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3c4a922..0ed85ca 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -1,7 +1,7 @@
-
 /******************************************************************************
  *
- * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface
+ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
+ *                   original/legacy sleep/PM registers.
  *
  *****************************************************************************/
 
@@ -43,213 +43,37 @@
  */
 
 #include <acpi/acpi.h>
+#include <linux/acpi.h>
 #include "accommon.h"
-#include "actables.h"
-#include <linux/tboot.h>
 #include <linux/module.h>
 
 #define _COMPONENT          ACPI_HARDWARE
 ACPI_MODULE_NAME("hwsleep")
 
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /*******************************************************************************
  *
- * FUNCTION:    acpi_set_firmware_waking_vector
- *
- * PARAMETERS:  physical_address    - 32-bit physical address of ACPI real mode
- *                                    entry point.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
- *
- ******************************************************************************/
-acpi_status
-acpi_set_firmware_waking_vector(u32 physical_address)
-{
-	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
-
-
-	/*
-	 * According to the ACPI specification 2.0c and later, the 64-bit
-	 * waking vector should be cleared and the 32-bit waking vector should
-	 * be used, unless we want the wake-up code to be called by the BIOS in
-	 * Protected Mode.  Some systems (for example HP dv5-1004nr) are known
-	 * to fail to resume if the 64-bit vector is used.
-	 */
-
-	/* Set the 32-bit vector */
-
-	acpi_gbl_FACS->firmware_waking_vector = physical_address;
-
-	/* Clear the 64-bit vector if it exists */
-
-	if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
-		acpi_gbl_FACS->xfirmware_waking_vector = 0;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
-
-#if ACPI_MACHINE_WIDTH == 64
-/*******************************************************************************
- *
- * FUNCTION:    acpi_set_firmware_waking_vector64
- *
- * PARAMETERS:  physical_address    - 64-bit physical address of ACPI protected
- *                                    mode entry point.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
- *              it exists in the table. This function is intended for use with
- *              64-bit host operating systems.
- *
- ******************************************************************************/
-acpi_status
-acpi_set_firmware_waking_vector64(u64 physical_address)
-{
-	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
-
-
-	/* Determine if the 64-bit vector actually exists */
-
-	if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/* Clear 32-bit vector, set the 64-bit X_ vector */
-
-	acpi_gbl_FACS->firmware_waking_vector = 0;
-	acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enter_sleep_state_prep
+ * FUNCTION:    acpi_hw_legacy_sleep
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
+ *              Flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231)
- *              This function must execute with interrupts enabled.
- *              We break sleeping into 2 stages so that OSPM can handle
- *              various OS-specific tasks between the two steps.
- *
- ******************************************************************************/
-acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
-{
-	acpi_status status;
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
-
-	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
-
-	/* _PSW methods could be run here to enable wake-on keyboard, LAN, etc. */
-
-	status = acpi_get_sleep_type_data(sleep_state,
-					  &acpi_gbl_sleep_type_a,
-					  &acpi_gbl_sleep_type_b);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Setup parameter object */
-
-	arg_list.count = 1;
-	arg_list.pointer = &arg;
-
-	arg.type = ACPI_TYPE_INTEGER;
-	arg.integer.value = sleep_state;
-
-	/* Run the _PTS method */
-
-	status = acpi_evaluate_object(NULL, METHOD_NAME__PTS, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Setup the argument to _SST */
-
-	switch (sleep_state) {
-	case ACPI_STATE_S0:
-		arg.integer.value = ACPI_SST_WORKING;
-		break;
-
-	case ACPI_STATE_S1:
-	case ACPI_STATE_S2:
-	case ACPI_STATE_S3:
-		arg.integer.value = ACPI_SST_SLEEPING;
-		break;
-
-	case ACPI_STATE_S4:
-		arg.integer.value = ACPI_SST_SLEEP_CONTEXT;
-		break;
-
-	default:
-		arg.integer.value = ACPI_SST_INDICATOR_OFF;	/* Default is off */
-		break;
-	}
-
-	/*
-	 * Set the system indicators to show the desired sleep state.
-	 * _SST is an optional method (return no error if not found)
-	 */
-	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"While executing method _SST"));
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
-
-static unsigned int gts, bfs;
-module_param(gts, uint, 0644);
-module_param(bfs, uint, 0644);
-MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
-MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enter_sleep_state
- *
- * PARAMETERS:  sleep_state         - Which sleep state to enter
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ * DESCRIPTION: Enter a system sleep state via the legacy FADT PM registers
  *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
  *
  ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
 {
-	u32 pm1a_control;
-	u32 pm1b_control;
 	struct acpi_bit_register_info *sleep_type_reg_info;
 	struct acpi_bit_register_info *sleep_enable_reg_info;
+	u32 pm1a_control;
+	u32 pm1b_control;
 	u32 in_value;
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
 	acpi_status status;
 
-	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
-
-	if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
-	    (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
-		ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
-			    acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
-		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
-	}
+	ACPI_FUNCTION_TRACE(hw_legacy_sleep);
 
 	sleep_type_reg_info =
 	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
@@ -271,6 +95,18 @@
 		return_ACPI_STATUS(status);
 	}
 
+	if (sleep_state != ACPI_STATE_S5) {
+		/*
+		 * Disable BM arbitration. This feature is contained within an
+		 * optional register (PM2 Control), so ignore a BAD_ADDRESS
+		 * exception.
+		 */
+		status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
+		if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
 	/*
 	 * 1) Disable/Clear all GPEs
 	 * 2) Enable all wakeup GPEs
@@ -286,18 +122,10 @@
 		return_ACPI_STATUS(status);
 	}
 
-	if (gts) {
-		/* Execute the _GTS method */
+	/* Optionally execute _GTS (Going To Sleep) */
 
-		arg_list.count = 1;
-		arg_list.pointer = &arg;
-		arg.type = ACPI_TYPE_INTEGER;
-		arg.integer.value = sleep_state;
-
-		status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL);
-		if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-			return_ACPI_STATUS(status);
-		}
+	if (flags & ACPI_EXECUTE_GTS) {
+		acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
 	}
 
 	/* Get current value of PM1A control */
@@ -344,8 +172,12 @@
 
 	ACPI_FLUSH_CPU_CACHE();
 
-	tboot_sleep(sleep_state, pm1a_control, pm1b_control);
-
+	status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
+				       pm1b_control);
+	if (ACPI_SKIP(status))
+		return_ACPI_STATUS(AE_OK);
+	if (ACPI_FAILURE(status))
+		return_ACPI_STATUS(status);
 	/* Write #2: Write both SLP_TYP + SLP_EN */
 
 	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
@@ -375,114 +207,44 @@
 		}
 	}
 
-	/* Wait until we enter sleep state */
+	/* Wait for transition back to Working State */
 
 	do {
-		status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS,
-						    &in_value);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Spin until we wake */
-
-	} while (!in_value);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enter_sleep_state_s4bios
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform a S4 bios request.
- *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
- *
- ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
-{
-	u32 in_value;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
-
-	/* Clear the wake status bit (PM1) */
-
-	status =
-	    acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_clear_acpi_status();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * 1) Disable/Clear all GPEs
-	 * 2) Enable all wakeup GPEs
-	 */
-	status = acpi_hw_disable_all_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-	acpi_gbl_system_awake_and_running = FALSE;
-
-	status = acpi_hw_enable_all_wakeup_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_FLUSH_CPU_CACHE();
-
-	status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
-				    (u32) acpi_gbl_FADT.S4bios_request, 8);
-
-	do {
-		acpi_os_stall(1000);
 		status =
 		    acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
 		if (ACPI_FAILURE(status)) {
 			return_ACPI_STATUS(status);
 		}
+
 	} while (!in_value);
 
 	return_ACPI_STATUS(AE_OK);
 }
 
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
-
 /*******************************************************************************
  *
- * FUNCTION:    acpi_leave_sleep_state_prep
+ * FUNCTION:    acpi_hw_legacy_wake_prep
  *
- * PARAMETERS:  sleep_state         - Which sleep state we are exiting
+ * PARAMETERS:  sleep_state         - Which sleep state we just exited
+ *              Flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
  * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
  *              sleep.
- *              Called with interrupts DISABLED.
+ *              Called with interrupts ENABLED.
  *
  ******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
+
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
 {
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
 	acpi_status status;
 	struct acpi_bit_register_info *sleep_type_reg_info;
 	struct acpi_bit_register_info *sleep_enable_reg_info;
 	u32 pm1a_control;
 	u32 pm1b_control;
 
-	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
+	ACPI_FUNCTION_TRACE(hw_legacy_wake_prep);
 
 	/*
 	 * Set SLP_TYPE and SLP_EN to state S0.
@@ -525,27 +287,20 @@
 		}
 	}
 
-	if (bfs) {
-		/* Execute the _BFS method */
+	/* Optionally execute _BFS (Back From Sleep) */
 
-		arg_list.count = 1;
-		arg_list.pointer = &arg;
-		arg.type = ACPI_TYPE_INTEGER;
-		arg.integer.value = sleep_state;
-
-		status = acpi_evaluate_object(NULL, METHOD_NAME__BFS, &arg_list, NULL);
-		if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-			ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
-		}
+	if (flags & ACPI_EXECUTE_BFS) {
+		acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
 	}
 	return_ACPI_STATUS(status);
 }
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_leave_sleep_state
+ * FUNCTION:    acpi_hw_legacy_wake
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
+ *              Flags               - Reserved, set to zero
  *
  * RETURN:      Status
  *
@@ -553,31 +308,17 @@
  *              Called with interrupts ENABLED.
  *
  ******************************************************************************/
-acpi_status acpi_leave_sleep_state(u8 sleep_state)
+
+acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
 {
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
 	acpi_status status;
 
-	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
+	ACPI_FUNCTION_TRACE(hw_legacy_wake);
 
 	/* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
 
 	acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
-
-	/* Setup parameter object */
-
-	arg_list.count = 1;
-	arg_list.pointer = &arg;
-	arg.type = ACPI_TYPE_INTEGER;
-
-	/* Ignore any errors from these methods */
-
-	arg.integer.value = ACPI_SST_WAKING;
-	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
-	}
+	acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
 
 	/*
 	 * GPEs must be enabled before _WAK is called as GPEs
@@ -591,46 +332,50 @@
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 	}
+
 	status = acpi_hw_enable_all_runtime_gpes();
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 	}
 
-	arg.integer.value = sleep_state;
-	status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
-	}
-	/* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
+	/*
+	 * Now we can execute _WAK, etc. Some machines require that the GPEs
+	 * are enabled before the wake methods are executed.
+	 */
+	acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
 
 	/*
-	 * Some BIOSes assume that WAK_STS will be cleared on resume and use
-	 * it to determine whether the system is rebooting or resuming. Clear
-	 * it for compatibility.
+	 * Some BIOS code assumes that WAK_STS will be cleared on resume
+	 * and use it to determine whether the system is rebooting or
+	 * resuming. Clear WAK_STS for compatibility.
 	 */
 	acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1);
-
 	acpi_gbl_system_awake_and_running = TRUE;
 
 	/* Enable power button */
 
 	(void)
 	    acpi_write_bit_register(acpi_gbl_fixed_event_info
-			      [ACPI_EVENT_POWER_BUTTON].
-			      enable_register_id, ACPI_ENABLE_EVENT);
+				    [ACPI_EVENT_POWER_BUTTON].
+				    enable_register_id, ACPI_ENABLE_EVENT);
 
 	(void)
 	    acpi_write_bit_register(acpi_gbl_fixed_event_info
-			      [ACPI_EVENT_POWER_BUTTON].
-			      status_register_id, ACPI_CLEAR_STATUS);
+				    [ACPI_EVENT_POWER_BUTTON].
+				    status_register_id, ACPI_CLEAR_STATUS);
 
-	arg.integer.value = ACPI_SST_WORKING;
-	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
+	/*
+	 * Enable BM arbitration. This feature is contained within an
+	 * optional register (PM2 Control), so ignore a BAD_ADDRESS
+	 * exception.
+	 */
+	status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
+	if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
+		return_ACPI_STATUS(status);
 	}
 
+	acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
 	return_ACPI_STATUS(status);
 }
 
-ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index d4973d9..f1b2c3b 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -49,6 +49,7 @@
 #define _COMPONENT          ACPI_HARDWARE
 ACPI_MODULE_NAME("hwtimer")
 
+#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 /******************************************************************************
  *
  * FUNCTION:    acpi_get_timer_resolution
@@ -187,3 +188,4 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
+#endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 9d38eb6..ab513a9 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -74,8 +74,7 @@
 
 	/* Check if the reset register is supported */
 
-	if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
-	    !reset_reg->address) {
+	if (!reset_reg->address) {
 		return_ACPI_STATUS(AE_NOT_EXIST);
 	}
 
@@ -138,11 +137,6 @@
 		return (status);
 	}
 
-	width = reg->bit_width;
-	if (width == 64) {
-		width = 32;	/* Break into two 32-bit transfers */
-	}
-
 	/* Initialize entire 64-bit return value to zero */
 
 	*return_value = 0;
@@ -154,25 +148,18 @@
 	 */
 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 		status = acpi_os_read_memory((acpi_physical_address)
-					     address, &value, width);
+					     address, return_value,
+					     reg->bit_width);
 		if (ACPI_FAILURE(status)) {
 			return (status);
 		}
-		*return_value = value;
-
-		if (reg->bit_width == 64) {
-
-			/* Read the top 32 bits */
-
-			status = acpi_os_read_memory((acpi_physical_address)
-						     (address + 4), &value, 32);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-			*return_value |= ((u64)value << 32);
-		}
 	} else {		/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
+		width = reg->bit_width;
+		if (width == 64) {
+			width = 32;	/* Break into two 32-bit transfers */
+		}
+
 		status = acpi_hw_read_port((acpi_io_address)
 					   address, &value, width);
 		if (ACPI_FAILURE(status)) {
@@ -231,33 +218,23 @@
 		return (status);
 	}
 
-	width = reg->bit_width;
-	if (width == 64) {
-		width = 32;	/* Break into two 32-bit transfers */
-	}
-
 	/*
 	 * Two address spaces supported: Memory or IO. PCI_Config is
 	 * not supported here because the GAS structure is insufficient
 	 */
 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 		status = acpi_os_write_memory((acpi_physical_address)
-					      address, ACPI_LODWORD(value),
-					      width);
+					      address, value, reg->bit_width);
 		if (ACPI_FAILURE(status)) {
 			return (status);
 		}
-
-		if (reg->bit_width == 64) {
-			status = acpi_os_write_memory((acpi_physical_address)
-						      (address + 4),
-						      ACPI_HIDWORD(value), 32);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-		}
 	} else {		/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
+		width = reg->bit_width;
+		if (width == 64) {
+			width = 32;	/* Break into two 32-bit transfers */
+		}
+
 		status = acpi_hw_write_port((acpi_io_address)
 					    address, ACPI_LODWORD(value),
 					    width);
@@ -286,6 +263,7 @@
 
 ACPI_EXPORT_SYMBOL(acpi_write)
 
+#if (!ACPI_REDUCED_HARDWARE)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_read_bit_register
@@ -453,7 +431,7 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
-
+#endif				/* !ACPI_REDUCED_HARDWARE */
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_sleep_type_data
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
new file mode 100644
index 0000000..762d059
--- /dev/null
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -0,0 +1,431 @@
+/******************************************************************************
+ *
+ * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include <linux/module.h>
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwxfsleep")
+
+/* Local prototypes */
+static acpi_status
+acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
+
+/*
+ * Dispatch table used to efficiently branch to the various sleep
+ * functions.
+ */
+#define ACPI_SLEEP_FUNCTION_ID         0
+#define ACPI_WAKE_PREP_FUNCTION_ID     1
+#define ACPI_WAKE_FUNCTION_ID          2
+
+/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
+
+static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
+	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
+	 acpi_hw_extended_sleep},
+	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
+	 acpi_hw_extended_wake_prep},
+	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
+};
+
+/*
+ * These functions are removed for the ACPI_REDUCED_HARDWARE case:
+ *      acpi_set_firmware_waking_vector
+ *      acpi_set_firmware_waking_vector64
+ *      acpi_enter_sleep_state_s4bios
+ */
+
+#if (!ACPI_REDUCED_HARDWARE)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_firmware_waking_vector
+ *
+ * PARAMETERS:  physical_address    - 32-bit physical address of ACPI real mode
+ *                                    entry point.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
+ *
+ ******************************************************************************/
+
+acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
+{
+	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+
+
+	/*
+	 * According to the ACPI specification 2.0c and later, the 64-bit
+	 * waking vector should be cleared and the 32-bit waking vector should
+	 * be used, unless we want the wake-up code to be called by the BIOS in
+	 * Protected Mode.  Some systems (for example HP dv5-1004nr) are known
+	 * to fail to resume if the 64-bit vector is used.
+	 */
+
+	/* Set the 32-bit vector */
+
+	acpi_gbl_FACS->firmware_waking_vector = physical_address;
+
+	/* Clear the 64-bit vector if it exists */
+
+	if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
+		acpi_gbl_FACS->xfirmware_waking_vector = 0;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+
+#if ACPI_MACHINE_WIDTH == 64
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_firmware_waking_vector64
+ *
+ * PARAMETERS:  physical_address    - 64-bit physical address of ACPI protected
+ *                                    mode entry point.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
+ *              it exists in the table. This function is intended for use with
+ *              64-bit host operating systems.
+ *
+ ******************************************************************************/
+acpi_status acpi_set_firmware_waking_vector64(u64 physical_address)
+{
+	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
+
+
+	/* Determine if the 64-bit vector actually exists */
+
+	if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Clear 32-bit vector, set the 64-bit X_ vector */
+
+	acpi_gbl_FACS->firmware_waking_vector = 0;
+	acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enter_sleep_state_s4bios
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform a S4 bios request.
+ *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+{
+	u32 in_value;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
+
+	/* Clear the wake status bit (PM1) */
+
+	status =
+	    acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_clear_acpi_status();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * 1) Disable/Clear all GPEs
+	 * 2) Enable all wakeup GPEs
+	 */
+	status = acpi_hw_disable_all_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+	acpi_gbl_system_awake_and_running = FALSE;
+
+	status = acpi_hw_enable_all_wakeup_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_FLUSH_CPU_CACHE();
+
+	status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
+				    (u32)acpi_gbl_FADT.S4bios_request, 8);
+
+	do {
+		acpi_os_stall(1000);
+		status =
+		    acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	} while (!in_value);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
+#endif				/* !ACPI_REDUCED_HARDWARE */
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_sleep_dispatch
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state to enter/exit
+ *              function_id         - Sleep, wake_prep, or Wake
+ *
+ * RETURN:      Status from the invoked sleep handling function.
+ *
+ * DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling
+ *              function.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
+{
+	acpi_status status;
+	struct acpi_sleep_functions *sleep_functions =
+	    &acpi_sleep_dispatch[function_id];
+
+#if (!ACPI_REDUCED_HARDWARE)
+
+	/*
+	 * If the Hardware Reduced flag is set (from the FADT), we must
+	 * use the extended sleep registers
+	 */
+	if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
+		status = sleep_functions->extended_function(sleep_state, flags);
+	} else {
+		/* Legacy sleep */
+
+		status = sleep_functions->legacy_function(sleep_state, flags);
+	}
+
+	return (status);
+
+#else
+	/*
+	 * For the case where reduced-hardware-only code is being generated,
+	 * we know that only the extended sleep registers are available
+	 */
+	status = sleep_functions->extended_function(sleep_state, flags);
+	return (status);
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enter_sleep_state_prep
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state to enter
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Prepare to enter a system sleep state.
+ *              This function must execute with interrupts enabled.
+ *              We break sleeping into 2 stages so that OSPM can handle
+ *              various OS-specific tasks between the two steps.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
+{
+	acpi_status status;
+	struct acpi_object_list arg_list;
+	union acpi_object arg;
+	u32 sst_value;
+
+	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
+
+	status = acpi_get_sleep_type_data(sleep_state,
+					  &acpi_gbl_sleep_type_a,
+					  &acpi_gbl_sleep_type_b);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Execute the _PTS method (Prepare To Sleep) */
+
+	arg_list.count = 1;
+	arg_list.pointer = &arg;
+	arg.type = ACPI_TYPE_INTEGER;
+	arg.integer.value = sleep_state;
+
+	status =
+	    acpi_evaluate_object(NULL, METHOD_PATHNAME__PTS, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Setup the argument to the _SST method (System STatus) */
+
+	switch (sleep_state) {
+	case ACPI_STATE_S0:
+		sst_value = ACPI_SST_WORKING;
+		break;
+
+	case ACPI_STATE_S1:
+	case ACPI_STATE_S2:
+	case ACPI_STATE_S3:
+		sst_value = ACPI_SST_SLEEPING;
+		break;
+
+	case ACPI_STATE_S4:
+		sst_value = ACPI_SST_SLEEP_CONTEXT;
+		break;
+
+	default:
+		sst_value = ACPI_SST_INDICATOR_OFF;	/* Default is off */
+		break;
+	}
+
+	/*
+	 * Set the system indicators to show the desired sleep state.
+	 * _SST is an optional method (return no error if not found)
+	 */
+	acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, sst_value);
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enter_sleep_state
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state to enter
+ *              Flags               - ACPI_EXECUTE_GTS to run optional method
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
+
+	if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
+	    (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
+		ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
+			    acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
+		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+	}
+
+	status =
+	    acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_leave_sleep_state_prep
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state we are exiting
+ *              Flags               - ACPI_EXECUTE_BFS to run optional method
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
+ *              sleep.
+ *              Called with interrupts DISABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
+
+	status =
+	    acpi_hw_sleep_dispatch(sleep_state, flags,
+				   ACPI_WAKE_PREP_FUNCTION_ID);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state_prep)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_leave_sleep_state
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state we are exiting
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
+ *              Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state(u8 sleep_state)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
+
+
+	status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b7f2b3b..3f7f3f6 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -242,7 +242,20 @@
 
 		if (!obj_desc) {
 
-			/* No attached object, we are done */
+			/* No attached object. Some types should always have an object */
+
+			switch (type) {
+			case ACPI_TYPE_INTEGER:
+			case ACPI_TYPE_PACKAGE:
+			case ACPI_TYPE_BUFFER:
+			case ACPI_TYPE_STRING:
+			case ACPI_TYPE_METHOD:
+				acpi_os_printf("<No attached object>");
+				break;
+
+			default:
+				break;
+			}
 
 			acpi_os_printf("\n");
 			return (AE_OK);
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 30ea5bc..3b5acb0 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -121,7 +121,7 @@
 		return;
 	}
 
-	status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle);
+	status = acpi_get_handle(NULL, METHOD_NAME__SB_, &sys_bus_handle);
 	if (ACPI_FAILURE(status)) {
 		return;
 	}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index bbe46a4..23ce096 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -638,8 +638,8 @@
 			/* Create the new outer package and populate it */
 
 			status =
-			    acpi_ns_repair_package_list(data,
-							return_object_ptr);
+			    acpi_ns_wrap_with_package(data, *elements,
+						      return_object_ptr);
 			if (ACPI_FAILURE(status)) {
 				return (status);
 			}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 9c35d20..5519a64 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -71,11 +71,10 @@
  * Buffer  -> String
  * Buffer  -> Package of Integers
  * Package -> Package of one Package
+ * An incorrect standalone object is wrapped with required outer package
  *
  * Additional possible repairs:
- *
  * Required package elements that are NULL replaced by Integer/String/Buffer
- * Incorrect standalone package wrapped with required outer package
  *
  ******************************************************************************/
 /* Local prototypes */
@@ -91,10 +90,6 @@
 acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
 			  union acpi_operand_object **return_object);
 
-static acpi_status
-acpi_ns_convert_to_package(union acpi_operand_object *original_object,
-			   union acpi_operand_object **return_object);
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_repair_object
@@ -151,9 +146,24 @@
 		}
 	}
 	if (expected_btypes & ACPI_RTYPE_PACKAGE) {
-		status = acpi_ns_convert_to_package(return_object, &new_object);
+		/*
+		 * A package is expected. We will wrap the existing object with a
+		 * new package object. It is often the case that if a variable-length
+		 * package is required, but there is only a single object needed, the
+		 * BIOS will return that object instead of wrapping it with a Package
+		 * object. Note: after the wrapping, the package will be validated
+		 * for correct contents (expected object type or types).
+		 */
+		status =
+		    acpi_ns_wrap_with_package(data, return_object, &new_object);
 		if (ACPI_SUCCESS(status)) {
-			goto object_repaired;
+			/*
+			 * The original object just had its reference count
+			 * incremented for being inserted into the new package.
+			 */
+			*return_object_ptr = new_object;	/* New Package object */
+			data->flags |= ACPI_OBJECT_REPAIRED;
+			return (AE_OK);
 		}
 	}
 
@@ -165,22 +175,27 @@
 
 	/* Object was successfully repaired */
 
-	/*
-	 * If the original object is a package element, we need to:
-	 * 1. Set the reference count of the new object to match the
-	 *    reference count of the old object.
-	 * 2. Decrement the reference count of the original object.
-	 */
 	if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
-		new_object->common.reference_count =
-		    return_object->common.reference_count;
+		/*
+		 * The original object is a package element. We need to
+		 * decrement the reference count of the original object,
+		 * for removing it from the package.
+		 *
+		 * However, if the original object was just wrapped with a
+		 * package object as part of the repair, we don't need to
+		 * change the reference count.
+		 */
+		if (!(data->flags & ACPI_OBJECT_WRAPPED)) {
+			new_object->common.reference_count =
+			    return_object->common.reference_count;
 
-		if (return_object->common.reference_count > 1) {
-			return_object->common.reference_count--;
+			if (return_object->common.reference_count > 1) {
+				return_object->common.reference_count--;
+			}
 		}
 
 		ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
-				  "%s: Converted %s to expected %s at index %u\n",
+				  "%s: Converted %s to expected %s at Package index %u\n",
 				  data->pathname,
 				  acpi_ut_get_object_type_name(return_object),
 				  acpi_ut_get_object_type_name(new_object),
@@ -453,65 +468,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ns_convert_to_package
- *
- * PARAMETERS:  original_object     - Object to be converted
- *              return_object       - Where the new converted object is returned
- *
- * RETURN:      Status. AE_OK if conversion was successful.
- *
- * DESCRIPTION: Attempt to convert a Buffer object to a Package. Each byte of
- *              the buffer is converted to a single integer package element.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_convert_to_package(union acpi_operand_object *original_object,
-			   union acpi_operand_object **return_object)
-{
-	union acpi_operand_object *new_object;
-	union acpi_operand_object **elements;
-	u32 length;
-	u8 *buffer;
-
-	switch (original_object->common.type) {
-	case ACPI_TYPE_BUFFER:
-
-		/* Buffer-to-Package conversion */
-
-		length = original_object->buffer.length;
-		new_object = acpi_ut_create_package_object(length);
-		if (!new_object) {
-			return (AE_NO_MEMORY);
-		}
-
-		/* Convert each buffer byte to an integer package element */
-
-		elements = new_object->package.elements;
-		buffer = original_object->buffer.pointer;
-
-		while (length--) {
-			*elements =
-			    acpi_ut_create_integer_object((u64) *buffer);
-			if (!*elements) {
-				acpi_ut_remove_reference(new_object);
-				return (AE_NO_MEMORY);
-			}
-			elements++;
-			buffer++;
-		}
-		break;
-
-	default:
-		return (AE_AML_OPERAND_TYPE);
-	}
-
-	*return_object = new_object;
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ns_repair_null_element
  *
  * PARAMETERS:  Data                - Pointer to validation data structure
@@ -677,55 +633,56 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ns_repair_package_list
+ * FUNCTION:    acpi_ns_wrap_with_package
  *
  * PARAMETERS:  Data                - Pointer to validation data structure
- *              obj_desc_ptr        - Pointer to the object to repair. The new
- *                                    package object is returned here,
- *                                    overwriting the old object.
+ *              original_object     - Pointer to the object to repair.
+ *              obj_desc_ptr        - The new package object is returned here
  *
  * RETURN:      Status, new object in *obj_desc_ptr
  *
- * DESCRIPTION: Repair a common problem with objects that are defined to return
- *              a variable-length Package of Packages. If the variable-length
- *              is one, some BIOS code mistakenly simply declares a single
- *              Package instead of a Package with one sub-Package. This
- *              function attempts to repair this error by wrapping a Package
- *              object around the original Package, creating the correct
- *              Package with one sub-Package.
+ * DESCRIPTION: Repair a common problem with objects that are defined to
+ *              return a variable-length Package of sub-objects. If there is
+ *              only one sub-object, some BIOS code mistakenly simply declares
+ *              the single object instead of a Package with one sub-object.
+ *              This function attempts to repair this error by wrapping a
+ *              Package object around the original object, creating the
+ *              correct and expected Package with one sub-object.
  *
  *              Names that can be repaired in this manner include:
- *              _ALR, _CSD, _HPX, _MLS, _PRT, _PSS, _TRT, TSS
+ *              _ALR, _CSD, _HPX, _MLS, _PLD, _PRT, _PSS, _TRT, _TSS,
+ *              _BCL, _DOD, _FIX, _Sx
  *
  ******************************************************************************/
 
 acpi_status
-acpi_ns_repair_package_list(struct acpi_predefined_data *data,
-			    union acpi_operand_object **obj_desc_ptr)
+acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
+			  union acpi_operand_object *original_object,
+			  union acpi_operand_object **obj_desc_ptr)
 {
 	union acpi_operand_object *pkg_obj_desc;
 
-	ACPI_FUNCTION_NAME(ns_repair_package_list);
+	ACPI_FUNCTION_NAME(ns_wrap_with_package);
 
 	/*
 	 * Create the new outer package and populate it. The new package will
-	 * have a single element, the lone subpackage.
+	 * have a single element, the lone sub-object.
 	 */
 	pkg_obj_desc = acpi_ut_create_package_object(1);
 	if (!pkg_obj_desc) {
 		return (AE_NO_MEMORY);
 	}
 
-	pkg_obj_desc->package.elements[0] = *obj_desc_ptr;
+	pkg_obj_desc->package.elements[0] = original_object;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+			  "%s: Wrapped %s with expected Package object\n",
+			  data->pathname,
+			  acpi_ut_get_object_type_name(original_object)));
 
 	/* Return the new object in the object pointer */
 
 	*obj_desc_ptr = pkg_obj_desc;
-	data->flags |= ACPI_OBJECT_REPAIRED;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
-			  "%s: Repaired incorrectly formed Package\n",
-			  data->pathname));
-
+	data->flags |= ACPI_OBJECT_REPAIRED | ACPI_OBJECT_WRAPPED;
 	return (AE_OK);
 }
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a535b7a..7511375 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -341,7 +341,7 @@
 
 		if (!acpi_ns_valid_path_separator(*external_name) &&
 		    (*external_name != 0)) {
-			return_ACPI_STATUS(AE_BAD_PARAMETER);
+			return_ACPI_STATUS(AE_BAD_PATHNAME);
 		}
 
 		/* Move on the next segment */
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index c5d8704..4c9c760 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -363,10 +363,6 @@
 	u32 address32;
 	u32 i;
 
-	/* Update the local FADT table header length */
-
-	acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
-
 	/*
 	 * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
 	 * Later code will always use the X 64-bit field. Also, check for an
@@ -408,6 +404,10 @@
 		acpi_gbl_FADT.boot_flags = 0;
 	}
 
+	/* Update the local FADT table header length */
+
+	acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
+
 	/*
 	 * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
 	 * generic address structures as necessary. Later code will always use
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 1aecf7b..c03500b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -114,7 +114,6 @@
 {
 	u32 i;
 	acpi_status status = AE_OK;
-	struct acpi_table_header *override_table = NULL;
 
 	ACPI_FUNCTION_TRACE(tb_add_table);
 
@@ -224,25 +223,10 @@
 	/*
 	 * ACPI Table Override:
 	 * Allow the host to override dynamically loaded tables.
+	 * NOTE: the table is fully mapped at this point, and the mapping will
+	 * be deleted by tb_table_override if the table is actually overridden.
 	 */
-	status = acpi_os_table_override(table_desc->pointer, &override_table);
-	if (ACPI_SUCCESS(status) && override_table) {
-		ACPI_INFO((AE_INFO,
-			   "%4.4s @ 0x%p Table override, replaced with:",
-			   table_desc->pointer->signature,
-			   ACPI_CAST_PTR(void, table_desc->address)));
-
-		/* We can delete the table that was passed as a parameter */
-
-		acpi_tb_delete_table(table_desc);
-
-		/* Setup descriptor for the new table */
-
-		table_desc->address = ACPI_PTR_TO_PHYSADDR(override_table);
-		table_desc->pointer = override_table;
-		table_desc->length = override_table->length;
-		table_desc->flags = ACPI_TABLE_ORIGIN_OVERRIDE;
-	}
+	(void)acpi_tb_table_override(table_desc->pointer, table_desc);
 
 	/* Add the table to the global root table list */
 
@@ -263,6 +247,95 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_tb_table_override
+ *
+ * PARAMETERS:  table_header        - Header for the original table
+ *              table_desc          - Table descriptor initialized for the
+ *                                    original table. May or may not be mapped.
+ *
+ * RETURN:      Pointer to the entire new table. NULL if table not overridden.
+ *              If overridden, installs the new table within the input table
+ *              descriptor.
+ *
+ * DESCRIPTION: Attempt table override by calling the OSL override functions.
+ *              Note: If the table is overridden, then the entire new table
+ *              is mapped and returned by this function.
+ *
+ ******************************************************************************/
+
+struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
+						 *table_header,
+						 struct acpi_table_desc
+						 *table_desc)
+{
+	acpi_status status;
+	struct acpi_table_header *new_table = NULL;
+	acpi_physical_address new_address = 0;
+	u32 new_table_length = 0;
+	u8 new_flags;
+	char *override_type;
+
+	/* (1) Attempt logical override (returns a logical address) */
+
+	status = acpi_os_table_override(table_header, &new_table);
+	if (ACPI_SUCCESS(status) && new_table) {
+		new_address = ACPI_PTR_TO_PHYSADDR(new_table);
+		new_table_length = new_table->length;
+		new_flags = ACPI_TABLE_ORIGIN_OVERRIDE;
+		override_type = "Logical";
+		goto finish_override;
+	}
+
+	/* (2) Attempt physical override (returns a physical address) */
+
+	status = acpi_os_physical_table_override(table_header,
+						 &new_address,
+						 &new_table_length);
+	if (ACPI_SUCCESS(status) && new_address && new_table_length) {
+
+		/* Map the entire new table */
+
+		new_table = acpi_os_map_memory(new_address, new_table_length);
+		if (!new_table) {
+			ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+					"%4.4s %p Attempted physical table override failed",
+					table_header->signature,
+					ACPI_CAST_PTR(void,
+						      table_desc->address)));
+			return (NULL);
+		}
+
+		override_type = "Physical";
+		new_flags = ACPI_TABLE_ORIGIN_MAPPED;
+		goto finish_override;
+	}
+
+	return (NULL);		/* There was no override */
+
+      finish_override:
+
+	ACPI_INFO((AE_INFO,
+		   "%4.4s %p %s table override, new table: %p",
+		   table_header->signature,
+		   ACPI_CAST_PTR(void, table_desc->address),
+		   override_type, new_table));
+
+	/* We can now unmap/delete the original table (if fully mapped) */
+
+	acpi_tb_delete_table(table_desc);
+
+	/* Setup descriptor for the new table */
+
+	table_desc->address = new_address;
+	table_desc->pointer = new_table;
+	table_desc->length = new_table_length;
+	table_desc->flags = new_flags;
+
+	return (new_table);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_tb_resize_root_table_list
  *
  * PARAMETERS:  None
@@ -396,7 +469,11 @@
 	case ACPI_TABLE_ORIGIN_ALLOCATED:
 		ACPI_FREE(table_desc->pointer);
 		break;
-	default:;
+
+		/* Not mapped or allocated, there is nothing we can do */
+
+	default:
+		return;
 	}
 
 	table_desc->pointer = NULL;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 09ca39e..0a706ca 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -118,6 +118,7 @@
 		return AE_OK;
 }
 
+#if (!ACPI_REDUCED_HARDWARE)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_tb_initialize_facs
@@ -148,6 +149,7 @@
 								&acpi_gbl_FACS));
 	return status;
 }
+#endif				/* !ACPI_REDUCED_HARDWARE */
 
 /*******************************************************************************
  *
@@ -444,7 +446,7 @@
  * RETURN:      None
  *
  * DESCRIPTION: Install an ACPI table into the global data structure. The
- *              table override mechanism is implemented here to allow the host
+ *              table override mechanism is called to allow the host
  *              OS to replace any table before it is installed in the root
  *              table array.
  *
@@ -454,11 +456,9 @@
 acpi_tb_install_table(acpi_physical_address address,
 		      char *signature, u32 table_index)
 {
-	u8 flags;
-	acpi_status status;
-	struct acpi_table_header *table_to_install;
-	struct acpi_table_header *mapped_table;
-	struct acpi_table_header *override_table = NULL;
+	struct acpi_table_header *table;
+	struct acpi_table_header *final_table;
+	struct acpi_table_desc *table_desc;
 
 	if (!address) {
 		ACPI_ERROR((AE_INFO,
@@ -469,69 +469,78 @@
 
 	/* Map just the table header */
 
-	mapped_table =
-	    acpi_os_map_memory(address, sizeof(struct acpi_table_header));
-	if (!mapped_table) {
+	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+	if (!table) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not map memory for table [%s] at %p",
+			    signature, ACPI_CAST_PTR(void, address)));
 		return;
 	}
 
 	/* If a particular signature is expected (DSDT/FACS), it must match */
 
-	if (signature && !ACPI_COMPARE_NAME(mapped_table->signature, signature)) {
+	if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
 		ACPI_ERROR((AE_INFO,
 			    "Invalid signature 0x%X for ACPI table, expected [%s]",
-			    *ACPI_CAST_PTR(u32, mapped_table->signature),
-			    signature));
+			    *ACPI_CAST_PTR(u32, table->signature), signature));
 		goto unmap_and_exit;
 	}
 
 	/*
+	 * Initialize the table entry. Set the pointer to NULL, since the
+	 * table is not fully mapped at this time.
+	 */
+	table_desc = &acpi_gbl_root_table_list.tables[table_index];
+
+	table_desc->address = address;
+	table_desc->pointer = NULL;
+	table_desc->length = table->length;
+	table_desc->flags = ACPI_TABLE_ORIGIN_MAPPED;
+	ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
+
+	/*
 	 * ACPI Table Override:
 	 *
 	 * Before we install the table, let the host OS override it with a new
 	 * one if desired. Any table within the RSDT/XSDT can be replaced,
 	 * including the DSDT which is pointed to by the FADT.
+	 *
+	 * NOTE: If the table is overridden, then final_table will contain a
+	 * mapped pointer to the full new table. If the table is not overridden,
+	 * or if there has been a physical override, then the table will be
+	 * fully mapped later (in verify table). In any case, we must
+	 * unmap the header that was mapped above.
 	 */
-	status = acpi_os_table_override(mapped_table, &override_table);
-	if (ACPI_SUCCESS(status) && override_table) {
-		ACPI_INFO((AE_INFO,
-			   "%4.4s @ 0x%p Table override, replaced with:",
-			   mapped_table->signature, ACPI_CAST_PTR(void,
-								  address)));
-
-		acpi_gbl_root_table_list.tables[table_index].pointer =
-		    override_table;
-		address = ACPI_PTR_TO_PHYSADDR(override_table);
-
-		table_to_install = override_table;
-		flags = ACPI_TABLE_ORIGIN_OVERRIDE;
-	} else {
-		table_to_install = mapped_table;
-		flags = ACPI_TABLE_ORIGIN_MAPPED;
+	final_table = acpi_tb_table_override(table, table_desc);
+	if (!final_table) {
+		final_table = table;	/* There was no override */
 	}
 
-	/* Initialize the table entry */
+	acpi_tb_print_table_header(table_desc->address, final_table);
 
-	acpi_gbl_root_table_list.tables[table_index].address = address;
-	acpi_gbl_root_table_list.tables[table_index].length =
-	    table_to_install->length;
-	acpi_gbl_root_table_list.tables[table_index].flags = flags;
-
-	ACPI_MOVE_32_TO_32(&
-			   (acpi_gbl_root_table_list.tables[table_index].
-			    signature), table_to_install->signature);
-
-	acpi_tb_print_table_header(address, table_to_install);
+	/* Set the global integer width (based upon revision of the DSDT) */
 
 	if (table_index == ACPI_TABLE_INDEX_DSDT) {
+		acpi_ut_set_integer_width(final_table->revision);
+	}
 
-		/* Global integer width is based upon revision of the DSDT */
-
-		acpi_ut_set_integer_width(table_to_install->revision);
+	/*
+	 * If we have a physical override during this early loading of the ACPI
+	 * tables, unmap the table for now. It will be mapped again later when
+	 * it is actually used. This supports very early loading of ACPI tables,
+	 * before virtual memory is fully initialized and running within the
+	 * host OS. Note: A logical override has the ACPI_TABLE_ORIGIN_OVERRIDE
+	 * flag set and will not be deleted below.
+	 */
+	if (final_table != table) {
+		acpi_tb_delete_table(table_desc);
 	}
 
       unmap_and_exit:
-	acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header));
+
+	/* Always unmap the table header that we mapped above */
+
+	acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index d42ede52..6848499 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -497,19 +497,20 @@
 
 /* Names for Notify() values, used for debug output */
 
-static const char *acpi_gbl_notify_value_names[] = {
-	"Bus Check",
-	"Device Check",
-	"Device Wake",
-	"Eject Request",
-	"Device Check Light",
-	"Frequency Mismatch",
-	"Bus Mode Mismatch",
-	"Power Fault",
-	"Capabilities Check",
-	"Device PLD Check",
-	"Reserved",
-	"System Locality Update"
+static const char *acpi_gbl_notify_value_names[ACPI_NOTIFY_MAX + 1] = {
+	/* 00 */ "Bus Check",
+	/* 01 */ "Device Check",
+	/* 02 */ "Device Wake",
+	/* 03 */ "Eject Request",
+	/* 04 */ "Device Check Light",
+	/* 05 */ "Frequency Mismatch",
+	/* 06 */ "Bus Mode Mismatch",
+	/* 07 */ "Power Fault",
+	/* 08 */ "Capabilities Check",
+	/* 09 */ "Device PLD Check",
+	/* 10 */ "Reserved",
+	/* 11 */ "System Locality Update",
+	/* 12 */ "Shutdown Request"
 };
 
 const char *acpi_ut_get_notify_name(u32 notify_value)
@@ -519,9 +520,10 @@
 		return (acpi_gbl_notify_value_names[notify_value]);
 	} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
 		return ("Reserved");
-	} else {		/* Greater or equal to 0x80 */
-
-		return ("**Device Specific**");
+	} else if (notify_value <= ACPI_MAX_DEVICE_SPECIFIC_NOTIFY) {
+		return ("Device Specific");
+	} else {
+		return ("Hardware Specific");
 	}
 }
 #endif
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 4153584..90f53b4 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -140,6 +140,7 @@
 	{NULL, ACPI_TYPE_ANY, NULL}
 };
 
+#if (!ACPI_REDUCED_HARDWARE)
 /******************************************************************************
  *
  * Event and Hardware globals
@@ -236,6 +237,7 @@
 					ACPI_BITMASK_RT_CLOCK_STATUS,
 					ACPI_BITMASK_RT_CLOCK_ENABLE},
 };
+#endif				/* !ACPI_REDUCED_HARDWARE */
 
 /*******************************************************************************
  *
@@ -286,6 +288,8 @@
 
 	acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
 
+#if (!ACPI_REDUCED_HARDWARE)
+
 	/* GPE support */
 
 	acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -294,6 +298,10 @@
 	acpi_current_gpe_count = 0;
 	acpi_gbl_all_gpes_initialized = FALSE;
 
+	acpi_gbl_global_event_handler = NULL;
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
+
 	/* Global handlers */
 
 	acpi_gbl_system_notify.handler = NULL;
@@ -302,7 +310,6 @@
 	acpi_gbl_init_handler = NULL;
 	acpi_gbl_table_handler = NULL;
 	acpi_gbl_interface_handler = NULL;
-	acpi_gbl_global_event_handler = NULL;
 
 	/* Global Lock support */
 
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 8359c0c..246798e 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -53,27 +53,35 @@
 /* Local prototypes */
 static void acpi_ut_terminate(void);
 
+#if (!ACPI_REDUCED_HARDWARE)
+
+static void acpi_ut_free_gpe_lists(void);
+
+#else
+
+#define acpi_ut_free_gpe_lists()
+#endif				/* !ACPI_REDUCED_HARDWARE */
+
+#if (!ACPI_REDUCED_HARDWARE)
 /******************************************************************************
  *
- * FUNCTION:    acpi_ut_terminate
+ * FUNCTION:    acpi_ut_free_gpe_lists
  *
  * PARAMETERS:  none
  *
  * RETURN:      none
  *
- * DESCRIPTION: Free global memory
+ * DESCRIPTION: Free global GPE lists
  *
  ******************************************************************************/
 
-static void acpi_ut_terminate(void)
+static void acpi_ut_free_gpe_lists(void)
 {
 	struct acpi_gpe_block_info *gpe_block;
 	struct acpi_gpe_block_info *next_gpe_block;
 	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
 	struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
 
-	ACPI_FUNCTION_TRACE(ut_terminate);
-
 	/* Free global GPE blocks and related info structures */
 
 	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
@@ -91,7 +99,26 @@
 		ACPI_FREE(gpe_xrupt_info);
 		gpe_xrupt_info = next_gpe_xrupt_info;
 	}
+}
+#endif				/* !ACPI_REDUCED_HARDWARE */
 
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_terminate
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      none
+ *
+ * DESCRIPTION: Free global memory
+ *
+ ******************************************************************************/
+
+static void acpi_ut_terminate(void)
+{
+	ACPI_FUNCTION_TRACE(ut_terminate);
+
+	acpi_ut_free_gpe_lists();
 	acpi_ut_delete_address_lists();
 	return_VOID;
 }
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 644e8c8..afa94f5 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -145,6 +145,8 @@
 
 	ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
 
+#if (!ACPI_REDUCED_HARDWARE)
+
 	/* Enable ACPI mode */
 
 	if (!(flags & ACPI_NO_ACPI_ENABLE)) {
@@ -169,6 +171,7 @@
 		ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
 		return_ACPI_STATUS(status);
 	}
+#endif				/* !ACPI_REDUCED_HARDWARE */
 
 	/*
 	 * Install the default op_region handlers. These are installed unless
@@ -184,7 +187,7 @@
 			return_ACPI_STATUS(status);
 		}
 	}
-
+#if (!ACPI_REDUCED_HARDWARE)
 	/*
 	 * Initialize ACPI Event handling (Fixed and General Purpose)
 	 *
@@ -220,6 +223,7 @@
 			return_ACPI_STATUS(status);
 		}
 	}
+#endif				/* !ACPI_REDUCED_HARDWARE */
 
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index e5d53b7..5577762 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -558,33 +558,48 @@
 }
 EXPORT_SYMBOL_GPL(apei_resources_release);
 
-static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
+				u32 *access_bit_width)
 {
-	u32 width, space_id;
+	u32 bit_width, bit_offset, access_size_code, space_id;
 
-	width = reg->bit_width;
+	bit_width = reg->bit_width;
+	bit_offset = reg->bit_offset;
+	access_size_code = reg->access_width;
 	space_id = reg->space_id;
 	/* Handle possible alignment issues */
 	memcpy(paddr, &reg->address, sizeof(*paddr));
 	if (!*paddr) {
 		pr_warning(FW_BUG APEI_PFX
-			   "Invalid physical address in GAR [0x%llx/%u/%u]\n",
-			   *paddr, width, space_id);
+			   "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
+			   *paddr, bit_width, bit_offset, access_size_code,
+			   space_id);
 		return -EINVAL;
 	}
 
-	if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+	if (access_size_code < 1 || access_size_code > 4) {
 		pr_warning(FW_BUG APEI_PFX
-			   "Invalid bit width in GAR [0x%llx/%u/%u]\n",
-			   *paddr, width, space_id);
+			   "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
+			   *paddr, bit_width, bit_offset, access_size_code,
+			   space_id);
+		return -EINVAL;
+	}
+	*access_bit_width = 1UL << (access_size_code + 2);
+
+	if ((bit_width + bit_offset) > *access_bit_width) {
+		pr_warning(FW_BUG APEI_PFX
+			   "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
+			   *paddr, bit_width, bit_offset, access_size_code,
+			   space_id);
 		return -EINVAL;
 	}
 
 	if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
 	    space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
 		pr_warning(FW_BUG APEI_PFX
-			   "Invalid address space type in GAR [0x%llx/%u/%u]\n",
-			   *paddr, width, space_id);
+			   "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
+			   *paddr, bit_width, bit_offset, access_size_code,
+			   space_id);
 		return -EINVAL;
 	}
 
@@ -595,23 +610,25 @@
 int apei_read(u64 *val, struct acpi_generic_address *reg)
 {
 	int rc;
+	u32 access_bit_width;
 	u64 address;
 	acpi_status status;
 
-	rc = apei_check_gar(reg, &address);
+	rc = apei_check_gar(reg, &address, &access_bit_width);
 	if (rc)
 		return rc;
 
 	*val = 0;
 	switch(reg->space_id) {
 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-		status = acpi_os_read_memory64((acpi_physical_address)
-					     address, val, reg->bit_width);
+		status = acpi_os_read_memory((acpi_physical_address) address,
+					       val, access_bit_width);
 		if (ACPI_FAILURE(status))
 			return -EIO;
 		break;
 	case ACPI_ADR_SPACE_SYSTEM_IO:
-		status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
+		status = acpi_os_read_port(address, (u32 *)val,
+					   access_bit_width);
 		if (ACPI_FAILURE(status))
 			return -EIO;
 		break;
@@ -627,22 +644,23 @@
 int apei_write(u64 val, struct acpi_generic_address *reg)
 {
 	int rc;
+	u32 access_bit_width;
 	u64 address;
 	acpi_status status;
 
-	rc = apei_check_gar(reg, &address);
+	rc = apei_check_gar(reg, &address, &access_bit_width);
 	if (rc)
 		return rc;
 
 	switch (reg->space_id) {
 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-		status = acpi_os_write_memory64((acpi_physical_address)
-					      address, val, reg->bit_width);
+		status = acpi_os_write_memory((acpi_physical_address) address,
+						val, access_bit_width);
 		if (ACPI_FAILURE(status))
 			return -EIO;
 		break;
 	case ACPI_ADR_SPACE_SYSTEM_IO:
-		status = acpi_os_write_port(address, val, reg->bit_width);
+		status = acpi_os_write_port(address, val, access_bit_width);
 		if (ACPI_FAILURE(status))
 			return -EIO;
 		break;
@@ -661,23 +679,24 @@
 	struct apei_resources *resources = data;
 	struct acpi_generic_address *reg = &entry->register_region;
 	u8 ins = entry->instruction;
+	u32 access_bit_width;
 	u64 paddr;
 	int rc;
 
 	if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
 		return 0;
 
-	rc = apei_check_gar(reg, &paddr);
+	rc = apei_check_gar(reg, &paddr, &access_bit_width);
 	if (rc)
 		return rc;
 
 	switch (reg->space_id) {
 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
 		return apei_res_add(&resources->iomem, paddr,
-				    reg->bit_width / 8);
+				    access_bit_width / 8);
 	case ACPI_ADR_SPACE_SYSTEM_IO:
 		return apei_res_add(&resources->ioport, paddr,
-				    reg->bit_width / 8);
+				    access_bit_width / 8);
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 5d41894..e6defd8 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -362,6 +362,7 @@
 		gedata_len = gdata->error_data_length;
 		apei_estatus_print_section(pfx, gdata, sec_no);
 		data_len -= gedata_len + sizeof(*gdata);
+		gdata = (void *)(gdata + 1) + gedata_len;
 		sec_no++;
 	}
 }
@@ -396,6 +397,7 @@
 		if (gedata_len > data_len - sizeof(*gdata))
 			return -EINVAL;
 		data_len -= gedata_len + sizeof(*gdata);
+		gdata = (void *)(gdata + 1) + gedata_len;
 	}
 	if (data_len)
 		return -EINVAL;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 4ca087d..8e17936 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -74,6 +74,8 @@
 	u8	reserved[3];
 };
 
+static u32 notrigger;
+
 static u32 vendor_flags;
 static struct debugfs_blob_wrapper vendor_blob;
 static char vendor_dev[64];
@@ -238,7 +240,7 @@
 			return v5param;
 		}
 	}
-	if (paddrv4) {
+	if (param_extension && paddrv4) {
 		struct einj_parameter *v4param;
 
 		v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
@@ -496,9 +498,11 @@
 	if (rc)
 		return rc;
 	trigger_paddr = apei_exec_ctx_get_output(&ctx);
-	rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
-	if (rc)
-		return rc;
+	if (notrigger == 0) {
+		rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
+		if (rc)
+			return rc;
+	}
 	rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
 
 	return rc;
@@ -700,6 +704,11 @@
 					    einj_debug_dir, &error_param2);
 		if (!fentry)
 			goto err_unmap;
+
+		fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
+					    einj_debug_dir, &notrigger);
+		if (!fentry)
+			goto err_unmap;
 	}
 
 	if (vendor_dev[0]) {
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index eb9fab5..e4d9d24 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -917,7 +917,7 @@
 {
 	if ((erst_tab->header_length !=
 	     (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
-	    && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
+	    && (erst_tab->header_length != sizeof(struct acpi_table_erst)))
 		return -EINVAL;
 	if (erst_tab->header.length < sizeof(struct acpi_table_erst))
 		return -EINVAL;
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
new file mode 100644
index 0000000..8cf6c46
--- /dev/null
+++ b/drivers/acpi/bgrt.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Red Hat, Inc <mjg@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+
+static struct acpi_table_bgrt *bgrt_tab;
+static struct kobject *bgrt_kobj;
+
+struct bmp_header {
+	u16 id;
+	u32 size;
+} __attribute ((packed));
+
+static struct bmp_header bmp_header;
+
+static ssize_t show_version(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version);
+}
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+
+static ssize_t show_status(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status);
+}
+static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+
+static ssize_t show_type(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type);
+}
+static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
+
+static ssize_t show_xoffset(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x);
+}
+static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
+
+static ssize_t show_yoffset(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y);
+}
+static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
+
+static ssize_t show_image(struct file *file, struct kobject *kobj,
+	       struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+	int size = attr->size;
+	void __iomem *image = attr->private;
+
+	if (off >= size) {
+		count = 0;
+	} else {
+		if (off + count > size)
+			count = size - off;
+
+		memcpy_fromio(buf, image+off, count);
+	}
+
+	return count;
+}
+
+static struct bin_attribute image_attr = {
+	.attr = {
+		.name = "image",
+		.mode = S_IRUGO,
+	},
+	.read = show_image,
+};
+
+static struct attribute *bgrt_attributes[] = {
+	&dev_attr_version.attr,
+	&dev_attr_status.attr,
+	&dev_attr_type.attr,
+	&dev_attr_xoffset.attr,
+	&dev_attr_yoffset.attr,
+	NULL,
+};
+
+static struct attribute_group bgrt_attribute_group = {
+	.attrs = bgrt_attributes,
+};
+
+static int __init bgrt_init(void)
+{
+	acpi_status status;
+	int ret;
+	void __iomem *bgrt;
+
+	if (acpi_disabled)
+		return -ENODEV;
+
+	status = acpi_get_table("BGRT", 0,
+				(struct acpi_table_header **)&bgrt_tab);
+
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	sysfs_bin_attr_init(&image_attr);
+
+	bgrt = ioremap(bgrt_tab->image_address, sizeof(struct bmp_header));
+
+	if (!bgrt) {
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	memcpy_fromio(&bmp_header, bgrt, sizeof(bmp_header));
+	image_attr.size = bmp_header.size;
+	iounmap(bgrt);
+
+	image_attr.private = ioremap(bgrt_tab->image_address, image_attr.size);
+
+	if (!image_attr.private) {
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+
+	bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
+	if (!bgrt_kobj) {
+		ret = -EINVAL;
+		goto out_iounmap;
+	}
+
+	ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group);
+	if (ret)
+		goto out_kobject;
+
+	ret = sysfs_create_bin_file(bgrt_kobj, &image_attr);
+	if (ret)
+		goto out_group;
+
+	return 0;
+
+out_group:
+	sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
+out_kobject:
+	kobject_put(bgrt_kobj);
+out_iounmap:
+	iounmap(image_attr.private);
+out_err:
+	return ret;
+}
+
+static void __exit bgrt_exit(void)
+{
+	iounmap(image_attr.private);
+	sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
+	sysfs_remove_bin_file(bgrt_kobj, &image_attr);
+}
+
+module_init(bgrt_init);
+module_exit(bgrt_exit);
+
+MODULE_AUTHOR("Matthew Garrett");
+MODULE_DESCRIPTION("BGRT boot graphic support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9ecec98..3263b68 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1010,6 +1010,7 @@
 }
 
 struct kobject *acpi_kobj;
+EXPORT_SYMBOL_GPL(acpi_kobj);
 
 static int __init acpi_init(void)
 {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e37615f..7edaccc 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -822,10 +822,10 @@
 		first_ec = ec;
 	device->driver_data = ec;
 
-	WARN(!request_region(ec->data_addr, 1, "EC data"),
-	     "Could not request EC data io port 0x%lx", ec->data_addr);
-	WARN(!request_region(ec->command_addr, 1, "EC cmd"),
-	     "Could not request EC cmd io port 0x%lx", ec->command_addr);
+	ret = !!request_region(ec->data_addr, 1, "EC data");
+	WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
+	ret = !!request_region(ec->command_addr, 1, "EC cmd");
+	WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
 	pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
 			  ec->gpe, ec->command_addr, ec->data_addr);
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index b258cab..7586544 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -27,12 +27,6 @@
 
 static struct dentry *acpi_ec_debugfs_dir;
 
-static int acpi_ec_open_io(struct inode *i, struct file *f)
-{
-	f->private_data = i->i_private;
-	return 0;
-}
-
 static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
 			       size_t count, loff_t *off)
 {
@@ -95,7 +89,7 @@
 
 static const struct file_operations acpi_ec_io_ops = {
 	.owner = THIS_MODULE,
-	.open  = acpi_ec_open_io,
+	.open  = simple_open,
 	.read  = acpi_ec_read_io,
 	.write = acpi_ec_write_io,
 	.llseek = default_llseek,
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 7a2035f..266bc58 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -95,8 +95,8 @@
 {
 	struct nvs_page *entry, *next;
 
-	pr_info("PM: Registering ACPI NVS region at %lx (%ld bytes)\n",
-		start, size);
+	pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
+		start, start + size - 1, size);
 
 	while (size > 0) {
 		unsigned int nr_bytes;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 412a1e0..ba14fb9 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -77,6 +77,9 @@
 extern char line_buf[80];
 #endif				/*ENABLE_DEBUGGER */
 
+static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
+				      u32 pm1b_ctrl);
+
 static acpi_osd_handler acpi_irq_handler;
 static void *acpi_irq_context;
 static struct workqueue_struct *kacpid_wq;
@@ -347,7 +350,7 @@
 	unsigned long pfn;
 
 	pfn = pg_off >> PAGE_SHIFT;
-	if (page_is_ram(pfn))
+	if (should_use_kmap(pfn))
 		kunmap(pfn_to_page(pfn));
 	else
 		iounmap(vaddr);
@@ -554,6 +557,15 @@
 	return AE_OK;
 }
 
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+				acpi_physical_address * new_address,
+				u32 *new_table_length)
+{
+	return AE_SUPPORT;
+}
+
+
 static irqreturn_t acpi_irq(int irq, void *dev_id)
 {
 	u32 handled;
@@ -595,7 +607,8 @@
 
 	acpi_irq_handler = handler;
 	acpi_irq_context = context;
-	if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
+	if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi",
+				 acpi_irq)) {
 		printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
 		acpi_irq_handler = NULL;
 		return AE_NOT_ACQUIRED;
@@ -699,49 +712,6 @@
 
 EXPORT_SYMBOL(acpi_os_write_port);
 
-acpi_status
-acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
-{
-	void __iomem *virt_addr;
-	unsigned int size = width / 8;
-	bool unmap = false;
-	u32 dummy;
-
-	rcu_read_lock();
-	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	if (!virt_addr) {
-		rcu_read_unlock();
-		virt_addr = acpi_os_ioremap(phys_addr, size);
-		if (!virt_addr)
-			return AE_BAD_ADDRESS;
-		unmap = true;
-	}
-
-	if (!value)
-		value = &dummy;
-
-	switch (width) {
-	case 8:
-		*(u8 *) value = readb(virt_addr);
-		break;
-	case 16:
-		*(u16 *) value = readw(virt_addr);
-		break;
-	case 32:
-		*(u32 *) value = readl(virt_addr);
-		break;
-	default:
-		BUG();
-	}
-
-	if (unmap)
-		iounmap(virt_addr);
-	else
-		rcu_read_unlock();
-
-	return AE_OK;
-}
-
 #ifdef readq
 static inline u64 read64(const volatile void __iomem *addr)
 {
@@ -758,7 +728,7 @@
 #endif
 
 acpi_status
-acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
+acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
 {
 	void __iomem *virt_addr;
 	unsigned int size = width / 8;
@@ -803,45 +773,6 @@
 	return AE_OK;
 }
 
-acpi_status
-acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
-{
-	void __iomem *virt_addr;
-	unsigned int size = width / 8;
-	bool unmap = false;
-
-	rcu_read_lock();
-	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	if (!virt_addr) {
-		rcu_read_unlock();
-		virt_addr = acpi_os_ioremap(phys_addr, size);
-		if (!virt_addr)
-			return AE_BAD_ADDRESS;
-		unmap = true;
-	}
-
-	switch (width) {
-	case 8:
-		writeb(value, virt_addr);
-		break;
-	case 16:
-		writew(value, virt_addr);
-		break;
-	case 32:
-		writel(value, virt_addr);
-		break;
-	default:
-		BUG();
-	}
-
-	if (unmap)
-		iounmap(virt_addr);
-	else
-		rcu_read_unlock();
-
-	return AE_OK;
-}
-
 #ifdef writeq
 static inline void write64(u64 val, volatile void __iomem *addr)
 {
@@ -856,7 +787,7 @@
 #endif
 
 acpi_status
-acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width)
+acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
 {
 	void __iomem *virt_addr;
 	unsigned int size = width / 8;
@@ -1641,3 +1572,24 @@
 
 	return AE_OK;
 }
+
+acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
+				  u32 pm1b_control)
+{
+	int rc = 0;
+	if (__acpi_os_prepare_sleep)
+		rc = __acpi_os_prepare_sleep(sleep_state,
+					     pm1a_control, pm1b_control);
+	if (rc < 0)
+		return AE_ERROR;
+	else if (rc > 0)
+		return AE_CTRL_SKIP;
+
+	return AE_OK;
+}
+
+void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
+			       u32 pm1a_ctrl, u32 pm1b_ctrl))
+{
+	__acpi_os_prepare_sleep = func;
+}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 9ac2a9f..7049a7d 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -40,9 +40,11 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include "sleep.h"
+#include "internal.h"
 
 #define PREFIX "ACPI: "
 
@@ -77,6 +79,20 @@
 		},
 };
 
+/*
+ * A power managed device
+ * A device may rely on multiple power resources.
+ * */
+struct acpi_power_managed_device {
+	struct device *dev; /* The physical device */
+	acpi_handle *handle;
+};
+
+struct acpi_power_resource_device {
+	struct acpi_power_managed_device *device;
+	struct acpi_power_resource_device *next;
+};
+
 struct acpi_power_resource {
 	struct acpi_device * device;
 	acpi_bus_id name;
@@ -84,6 +100,9 @@
 	u32 order;
 	unsigned int ref_count;
 	struct mutex resource_lock;
+
+	/* List of devices relying on this power resource */
+	struct acpi_power_resource_device *devices;
 };
 
 static struct list_head acpi_power_resource_list;
@@ -183,8 +202,26 @@
 	return 0;
 }
 
+/* Resume the device when all power resources in _PR0 are on */
+static void acpi_power_on_device(struct acpi_power_managed_device *device)
+{
+	struct acpi_device *acpi_dev;
+	acpi_handle handle = device->handle;
+	int state;
+
+	if (acpi_bus_get_device(handle, &acpi_dev))
+		return;
+
+	if(acpi_power_get_inferred_state(acpi_dev, &state))
+		return;
+
+	if (state == ACPI_STATE_D0 && pm_runtime_suspended(device->dev))
+		pm_request_resume(device->dev);
+}
+
 static int __acpi_power_on(struct acpi_power_resource *resource)
 {
+	struct acpi_power_resource_device *device_list = resource->devices;
 	acpi_status status = AE_OK;
 
 	status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
@@ -197,6 +234,12 @@
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
 			  resource->name));
 
+	while (device_list) {
+		acpi_power_on_device(device_list->device);
+
+		device_list = device_list->next;
+	}
+
 	return 0;
 }
 
@@ -299,6 +342,125 @@
 	return result;
 }
 
+static void __acpi_power_resource_unregister_device(struct device *dev,
+		acpi_handle res_handle)
+{
+	struct acpi_power_resource *resource = NULL;
+	struct acpi_power_resource_device *prev, *curr;
+
+	if (acpi_power_get_context(res_handle, &resource))
+		return;
+
+	mutex_lock(&resource->resource_lock);
+	prev = NULL;
+	curr = resource->devices;
+	while (curr) {
+		if (curr->device->dev == dev) {
+			if (!prev)
+				resource->devices = curr->next;
+			else
+				prev->next = curr->next;
+
+			kfree(curr);
+			break;
+		}
+
+		prev = curr;
+		curr = curr->next;
+	}
+	mutex_unlock(&resource->resource_lock);
+}
+
+/* Unlink dev from all power resources in _PR0 */
+void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle)
+{
+	struct acpi_device *acpi_dev;
+	struct acpi_handle_list *list;
+	int i;
+
+	if (!dev || !handle)
+		return;
+
+	if (acpi_bus_get_device(handle, &acpi_dev))
+		return;
+
+	list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+
+	for (i = 0; i < list->count; i++)
+		__acpi_power_resource_unregister_device(dev,
+			list->handles[i]);
+}
+
+static int __acpi_power_resource_register_device(
+	struct acpi_power_managed_device *powered_device, acpi_handle handle)
+{
+	struct acpi_power_resource *resource = NULL;
+	struct acpi_power_resource_device *power_resource_device;
+	int result;
+
+	result = acpi_power_get_context(handle, &resource);
+	if (result)
+		return result;
+
+	power_resource_device = kzalloc(
+		sizeof(*power_resource_device), GFP_KERNEL);
+	if (!power_resource_device)
+		return -ENOMEM;
+
+	power_resource_device->device = powered_device;
+
+	mutex_lock(&resource->resource_lock);
+	power_resource_device->next = resource->devices;
+	resource->devices = power_resource_device;
+	mutex_unlock(&resource->resource_lock);
+
+	return 0;
+}
+
+/* Link dev to all power resources in _PR0 */
+int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
+{
+	struct acpi_device *acpi_dev;
+	struct acpi_handle_list *list;
+	struct acpi_power_managed_device *powered_device;
+	int i, ret;
+
+	if (!dev || !handle)
+		return -ENODEV;
+
+	ret = acpi_bus_get_device(handle, &acpi_dev);
+	if (ret)
+		goto no_power_resource;
+
+	if (!acpi_dev->power.flags.power_resources)
+		goto no_power_resource;
+
+	powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
+	if (!powered_device)
+		return -ENOMEM;
+
+	powered_device->dev = dev;
+	powered_device->handle = handle;
+
+	list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+
+	for (i = 0; i < list->count; i++) {
+		ret = __acpi_power_resource_register_device(powered_device,
+			list->handles[i]);
+
+		if (ret) {
+			acpi_power_resource_unregister_device(dev, handle);
+			break;
+		}
+	}
+
+	return ret;
+
+no_power_resource:
+	printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
+	return -ENODEV;
+}
+
 /**
  * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
  *                          ACPI 3.0) _PSW (Power State Wake)
@@ -500,14 +662,14 @@
 {
 	int result;
 
-	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
 		return -EINVAL;
 
 	if (device->power.state == state)
 		return 0;
 
 	if ((device->power.state < ACPI_STATE_D0)
-	    || (device->power.state > ACPI_STATE_D3))
+	    || (device->power.state > ACPI_STATE_D3_COLD))
 		return -ENODEV;
 
 	/* TBD: Resources must be ordered. */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d4d9cb7..0734086 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -67,6 +67,7 @@
 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
 #define ACPI_PROCESSOR_NOTIFY_THROTTLING	0x82
+#define ACPI_PROCESSOR_DEVICE_HID	"ACPI0007"
 
 #define ACPI_PROCESSOR_LIMIT_USER	0
 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
@@ -87,7 +88,7 @@
 
 static const struct acpi_device_id processor_device_ids[] = {
 	{ACPI_PROCESSOR_OBJECT_HID, 0},
-	{"ACPI0007", 0},
+	{ACPI_PROCESSOR_DEVICE_HID, 0},
 	{"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
@@ -535,8 +536,8 @@
 		return -ENOMEM;
 
 	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
-		kfree(pr);
-		return -ENOMEM;
+		result = -ENOMEM;
+		goto err_free_pr;
 	}
 
 	pr->handle = device->handle;
@@ -576,7 +577,7 @@
 	dev = get_cpu_device(pr->id);
 	if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
 		result = -EFAULT;
-		goto err_free_cpumask;
+		goto err_clear_processor;
 	}
 
 	/*
@@ -594,9 +595,15 @@
 
 err_remove_sysfs:
 	sysfs_remove_link(&device->dev.kobj, "sysdev");
+err_clear_processor:
+	/*
+	 * processor_device_array is not cleared to allow checks for buggy BIOS
+	 */ 
+	per_cpu(processors, pr->id) = NULL;
 err_free_cpumask:
 	free_cpumask_var(pr->throttling.shared_cpu_map);
-
+err_free_pr:
+	kfree(pr);
 	return result;
 }
 
@@ -741,20 +748,46 @@
 	return;
 }
 
+static acpi_status is_processor_device(acpi_handle handle)
+{
+	struct acpi_device_info *info;
+	char *hid;
+	acpi_status status;
+
+	status = acpi_get_object_info(handle, &info);
+	if (ACPI_FAILURE(status))
+		return status;
+
+	if (info->type == ACPI_TYPE_PROCESSOR) {
+		kfree(info);
+		return AE_OK;	/* found a processor object */
+	}
+
+	if (!(info->valid & ACPI_VALID_HID)) {
+		kfree(info);
+		return AE_ERROR;
+	}
+
+	hid = info->hardware_id.string;
+	if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
+		kfree(info);
+		return AE_ERROR;
+	}
+
+	kfree(info);
+	return AE_OK;	/* found a processor device object */
+}
+
 static acpi_status
 processor_walk_namespace_cb(acpi_handle handle,
 			    u32 lvl, void *context, void **rv)
 {
 	acpi_status status;
 	int *action = context;
-	acpi_object_type type = 0;
 
-	status = acpi_get_type(handle, &type);
+	status = is_processor_device(handle);
 	if (ACPI_FAILURE(status))
-		return (AE_OK);
-
-	if (type != ACPI_TYPE_PROCESSOR)
-		return (AE_OK);
+		return AE_OK;	/* not a processor; continue to walk */
 
 	switch (*action) {
 	case INSTALL_NOTIFY_HANDLER:
@@ -772,7 +805,8 @@
 		break;
 	}
 
-	return (AE_OK);
+	/* found a processor; skip walking underneath */
+	return AE_CTRL_DEPTH;
 }
 
 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
@@ -830,7 +864,7 @@
 {
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 	int action = INSTALL_NOTIFY_HANDLER;
-	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+	acpi_walk_namespace(ACPI_TYPE_ANY,
 			    ACPI_ROOT_OBJECT,
 			    ACPI_UINT32_MAX,
 			    processor_walk_namespace_cb, NULL, &action, NULL);
@@ -843,7 +877,7 @@
 {
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 	int action = UNINSTALL_NOTIFY_HANDLER;
-	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+	acpi_walk_namespace(ACPI_TYPE_ANY,
 			    ACPI_ROOT_OBJECT,
 			    ACPI_UINT32_MAX,
 			    processor_walk_namespace_cb, NULL, &action, NULL);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0e8e2de..f3decb3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -770,6 +770,35 @@
 	return index;
 }
 
+
+/**
+ * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
+ * @dev: the target CPU
+ * @index: the index of suggested state
+ */
+static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+{
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+
+	ACPI_FLUSH_CPU_CACHE();
+
+	while (1) {
+
+		if (cx->entry_method == ACPI_CSTATE_HALT)
+			safe_halt();
+		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
+			inb(cx->address);
+			/* See comment in acpi_idle_do_entry() */
+			inl(acpi_gbl_FADT.xpm_timer_block.address);
+		} else
+			return -ENODEV;
+	}
+
+	/* Never reached */
+	return 0;
+}
+
 /**
  * acpi_idle_enter_simple - enters an ACPI state without BM handling
  * @dev: the target CPU
@@ -1077,12 +1106,14 @@
 				state->flags |= CPUIDLE_FLAG_TIME_VALID;
 
 			state->enter = acpi_idle_enter_c1;
+			state->enter_dead = acpi_idle_play_dead;
 			drv->safe_state_index = count;
 			break;
 
 			case ACPI_STATE_C2:
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
 			state->enter = acpi_idle_enter_simple;
+			state->enter_dead = acpi_idle_play_dead;
 			drv->safe_state_index = count;
 			break;
 
@@ -1159,8 +1190,7 @@
 	 * to make the code that updates C-States be called once.
 	 */
 
-	if (smp_processor_id() == 0 &&
-			cpuidle_get_driver() == &acpi_idle_driver) {
+	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
 
 		cpuidle_pause_and_lock();
 		/* Protect against cpu-hotplug */
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 3b599ab..641b545 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -57,6 +57,27 @@
 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
 static unsigned int acpi_thermal_cpufreq_is_init = 0;
 
+#define reduction_pctg(cpu) \
+	per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
+
+/*
+ * Emulate "per package data" using per cpu data (which should really be
+ * provided elsewhere)
+ *
+ * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
+ * temporarily. Fortunately that's not a big issue here (I hope)
+ */
+static int phys_package_first_cpu(int cpu)
+{
+	int i;
+	int id = topology_physical_package_id(cpu);
+
+	for_each_online_cpu(i)
+		if (topology_physical_package_id(i) == id)
+			return i;
+	return 0;
+}
+
 static int cpu_has_cpufreq(unsigned int cpu)
 {
 	struct cpufreq_policy policy;
@@ -76,7 +97,7 @@
 
 	max_freq = (
 	    policy->cpuinfo.max_freq *
-	    (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
+	    (100 - reduction_pctg(policy->cpu) * 20)
 	) / 100;
 
 	cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -102,16 +123,28 @@
 	if (!cpu_has_cpufreq(cpu))
 		return 0;
 
-	return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
+	return reduction_pctg(cpu);
 }
 
 static int cpufreq_set_cur_state(unsigned int cpu, int state)
 {
+	int i;
+
 	if (!cpu_has_cpufreq(cpu))
 		return 0;
 
-	per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
-	cpufreq_update_policy(cpu);
+	reduction_pctg(cpu) = state;
+
+	/*
+	 * Update all the CPUs in the same package because they all
+	 * contribute to the temperature and often share the same
+	 * frequency.
+	 */
+	for_each_online_cpu(i) {
+		if (topology_physical_package_id(i) ==
+		    topology_physical_package_id(cpu))
+			cpufreq_update_policy(i);
+	}
 	return 0;
 }
 
@@ -119,10 +152,6 @@
 {
 	int i;
 
-	for (i = 0; i < nr_cpu_ids; i++)
-		if (cpu_present(i))
-			per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
-
 	i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
 				      CPUFREQ_POLICY_NOTIFIER);
 	if (!i)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 605a295..1d02b7b 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -769,7 +769,7 @@
 					u64 *value)
 {
 	u32 bit_width, bit_offset;
-	u64 ptc_value;
+	u32 ptc_value;
 	u64 ptc_mask;
 	struct acpi_processor_throttling *throttling;
 	int ret = -1;
@@ -777,12 +777,11 @@
 	throttling = &pr->throttling;
 	switch (throttling->status_register.space_id) {
 	case ACPI_ADR_SPACE_SYSTEM_IO:
-		ptc_value = 0;
 		bit_width = throttling->status_register.bit_width;
 		bit_offset = throttling->status_register.bit_offset;
 
 		acpi_os_read_port((acpi_io_address) throttling->status_register.
-				  address, (u32 *) &ptc_value,
+				  address, &ptc_value,
 				  (u32) (bit_width + bit_offset));
 		ptc_mask = (1 << bit_width) - 1;
 		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index a6c77e8b..c1d6124 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -23,8 +23,7 @@
 	/* Is the reset register supported? The spec says we should be
 	 * checking the bit width and bit offset, but Windows ignores
 	 * these fields */
-	if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
-		return;
+	/* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */
 
 	reset_value = acpi_gbl_FADT.reset_value;
 
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8ab80ba..767e2dc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -880,18 +880,22 @@
 			int j;
 
 			device->power.flags.power_resources = 1;
-			ps->flags.valid = 1;
 			for (j = 0; j < ps->resources.count; j++)
 				acpi_bus_add_power_resource(ps->resources.handles[j]);
 		}
 
+		/* The exist of _PR3 indicates D3Cold support */
+		if (i == ACPI_STATE_D3) {
+			status = acpi_get_handle(device->handle, object_name, &handle);
+			if (ACPI_SUCCESS(status))
+				device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+		}
+
 		/* Evaluate "_PSx" to see if we can do explicit sets */
 		object_name[2] = 'S';
 		status = acpi_get_handle(device->handle, object_name, &handle);
-		if (ACPI_SUCCESS(status)) {
+		if (ACPI_SUCCESS(status))
 			ps->flags.explicit_set = 1;
-			ps->flags.valid = 1;
-		}
 
 		/* State is valid if we have some power control */
 		if (ps->resources.count || ps->flags.explicit_set)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ca191ff..1d661b5 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -17,6 +17,8 @@
 #include <linux/suspend.h>
 #include <linux/reboot.h>
 #include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
 
 #include <asm/io.h>
 
@@ -26,6 +28,24 @@
 #include "internal.h"
 #include "sleep.h"
 
+static unsigned int gts, bfs;
+module_param(gts, uint, 0644);
+module_param(bfs, uint, 0644);
+MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
+MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
+
+static u8 wake_sleep_flags(void)
+{
+	u8 flags = ACPI_NO_OPTIONAL_METHODS;
+
+	if (gts)
+		flags |= ACPI_EXECUTE_GTS;
+	if (bfs)
+		flags |= ACPI_EXECUTE_BFS;
+
+	return flags;
+}
+
 static u8 sleep_states[ACPI_S_STATE_COUNT];
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
@@ -243,6 +263,7 @@
 {
 	acpi_status status = AE_OK;
 	u32 acpi_state = acpi_target_sleep_state;
+	u8 flags = wake_sleep_flags();
 	int error;
 
 	ACPI_FLUSH_CPU_CACHE();
@@ -250,7 +271,7 @@
 	switch (acpi_state) {
 	case ACPI_STATE_S1:
 		barrier();
-		status = acpi_enter_sleep_state(acpi_state);
+		status = acpi_enter_sleep_state(acpi_state, flags);
 		break;
 
 	case ACPI_STATE_S3:
@@ -265,7 +286,7 @@
 	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
 
 	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(acpi_state);
+	acpi_leave_sleep_state_prep(acpi_state, flags);
 
 	/* ACPI 3.0 specs (P62) says that it's the responsibility
 	 * of the OSPM to clear the status bit [ implying that the
@@ -529,27 +550,30 @@
 
 static int acpi_hibernation_enter(void)
 {
+	u8 flags = wake_sleep_flags();
 	acpi_status status = AE_OK;
 
 	ACPI_FLUSH_CPU_CACHE();
 
 	/* This shouldn't return.  If it returns, we have a problem */
-	status = acpi_enter_sleep_state(ACPI_STATE_S4);
+	status = acpi_enter_sleep_state(ACPI_STATE_S4, flags);
 	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
 
 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
 }
 
 static void acpi_hibernation_leave(void)
 {
+	u8 flags = wake_sleep_flags();
+
 	/*
 	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
 	 * enable it here.
 	 */
 	acpi_enable();
 	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
 	/* Check the hardware signature */
 	if (facs && s4_hardware_signature != facs->hardware_signature) {
 		printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -730,6 +754,40 @@
 
 #ifdef CONFIG_PM_SLEEP
 /**
+ * acpi_pm_device_run_wake - Enable/disable wake-up for given device.
+ * @phys_dev: Device to enable/disable the platform to wake-up the system for.
+ * @enable: Whether enable or disable the wake-up functionality.
+ *
+ * Find the ACPI device object corresponding to @pci_dev and try to
+ * enable/disable the GPE associated with it.
+ */
+int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
+{
+	struct acpi_device *dev;
+	acpi_handle handle;
+
+	if (!device_run_wake(phys_dev))
+		return -EINVAL;
+
+	handle = DEVICE_ACPI_HANDLE(phys_dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
+		dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	if (enable) {
+		acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
+		acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
+	} else {
+		acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
+		acpi_disable_wakeup_device_power(dev);
+	}
+
+	return 0;
+}
+
+/**
  *	acpi_pm_device_sleep_wake - enable or disable the system wake-up
  *                                  capability of given device
  *	@dev: device to handle
@@ -770,10 +828,12 @@
 
 static void acpi_power_off(void)
 {
+	u8 flags = wake_sleep_flags();
+
 	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
 	printk(KERN_DEBUG "%s called\n", __func__);
 	local_irq_disable();
-	acpi_enter_sleep_state(ACPI_STATE_S5);
+	acpi_enter_sleep_state(ACPI_STATE_S5, flags);
 }
 
 /*
@@ -788,13 +848,13 @@
 {
 	acpi_handle dummy;
 
-	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__GTS, &dummy)))
+	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
 	{
 		printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
 		printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
 			"please notify linux-acpi@vger.kernel.org\n");
 	}
-	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__BFS, &dummy)))
+	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
 	{
 		printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
 		printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 48fbc64..7dbebea 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -941,13 +941,13 @@
 	if (!tz)
 		return -EINVAL;
 
-	/* Get temperature [_TMP] (required) */
-	result = acpi_thermal_get_temperature(tz);
+	/* Get trip points [_CRT, _PSV, etc.] (required) */
+	result = acpi_thermal_get_trip_points(tz);
 	if (result)
 		return result;
 
-	/* Get trip points [_CRT, _PSV, etc.] (required) */
-	result = acpi_thermal_get_trip_points(tz);
+	/* Get temperature [_TMP] (required) */
+	result = acpi_thermal_get_temperature(tz);
 	if (result)
 		return result;
 
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index eaef02a..9577b6f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -548,27 +548,27 @@
  *		1. 	The system BIOS should NOT automatically control the brightness 
  *			level of the LCD when the power changes from AC to DC.
  * Return Value:
- * 		-1	wrong arg.
+ *		-EINVAL	wrong arg.
  */
 
 static int
 acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
 {
-	u64 status = 0;
+	acpi_status status;
 	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
 	struct acpi_object_list args = { 1, &arg0 };
 
 
-	if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) {
-		status = -1;
-		goto Failed;
-	}
+	if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
+		return -EINVAL;
 	arg0.integer.value = (lcd_flag << 2) | bios_flag;
 	video->dos_setting = arg0.integer.value;
-	acpi_evaluate_object(video->device->handle, "_DOS", &args, NULL);
+	status = acpi_evaluate_object(video->device->handle, "_DOS",
+		&args, NULL);
+	if (ACPI_FAILURE(status))
+		return -EIO;
 
-      Failed:
-	return status;
+	return 0;
 }
 
 /*
@@ -1343,15 +1343,17 @@
 acpi_video_bus_get_devices(struct acpi_video_bus *video,
 			   struct acpi_device *device)
 {
-	int status = 0;
+	int status;
 	struct acpi_device *dev;
 
-	acpi_video_device_enumerate(video);
+	status = acpi_video_device_enumerate(video);
+	if (status)
+		return status;
 
 	list_for_each_entry(dev, &device->children, node) {
 
 		status = acpi_video_bus_get_one_device(dev, video);
-		if (ACPI_FAILURE(status)) {
+		if (status) {
 			printk(KERN_WARNING PREFIX
 					"Can't attach device\n");
 			continue;
@@ -1653,15 +1655,20 @@
 	mutex_init(&video->device_list_lock);
 	INIT_LIST_HEAD(&video->video_device_list);
 
-	acpi_video_bus_get_devices(video, device);
-	acpi_video_bus_start_devices(video);
+	error = acpi_video_bus_get_devices(video, device);
+	if (error)
+		goto err_free_video;
 
 	video->input = input = input_allocate_device();
 	if (!input) {
 		error = -ENOMEM;
-		goto err_stop_video;
+		goto err_put_video;
 	}
 
+	error = acpi_video_bus_start_devices(video);
+	if (error)
+		goto err_free_input_dev;
+
 	snprintf(video->phys, sizeof(video->phys),
 		"%s/video/input0", acpi_device_hid(video->device));
 
@@ -1682,7 +1689,7 @@
 
 	error = input_register_device(input);
 	if (error)
-		goto err_free_input_dev;
+		goto err_stop_video;
 
 	printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s  rom: %s  post: %s)\n",
 	       ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
@@ -1692,14 +1699,19 @@
 
 	video->pm_nb.notifier_call = acpi_video_resume;
 	video->pm_nb.priority = 0;
-	register_pm_notifier(&video->pm_nb);
+	error = register_pm_notifier(&video->pm_nb);
+	if (error)
+		goto err_unregister_input_dev;
 
 	return 0;
 
- err_free_input_dev:
-	input_free_device(input);
+ err_unregister_input_dev:
+	input_unregister_device(input);
  err_stop_video:
 	acpi_video_bus_stop_devices(video);
+ err_free_input_dev:
+	input_free_device(input);
+ err_put_video:
 	acpi_video_bus_put_devices(video);
 	kfree(video->attached_array);
  err_free_video:
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 6c9387d..5401814 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -16,10 +16,11 @@
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
 #include <linux/mutex.h>
-#include <linux/kthread.h>
+#include <linux/workqueue.h>
 #include <linux/highmem.h>
 #include <linux/firmware.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 
 #define to_dev(obj) container_of(obj, struct device, kobj)
 
@@ -81,6 +82,11 @@
 
 static int loading_timeout = 60;	/* In seconds */
 
+static inline long firmware_loading_timeout(void)
+{
+	return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
+}
+
 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
  * guarding for corner cases a global lock should be OK */
 static DEFINE_MUTEX(fw_lock);
@@ -440,13 +446,11 @@
 {
 	struct firmware_priv *fw_priv;
 	struct device *f_dev;
-	int error;
 
 	fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
 	if (!fw_priv) {
 		dev_err(device, "%s: kmalloc failed\n", __func__);
-		error = -ENOMEM;
-		goto err_out;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	fw_priv->fw = firmware;
@@ -463,98 +467,80 @@
 	f_dev->parent = device;
 	f_dev->class = &firmware_class;
 
-	dev_set_uevent_suppress(f_dev, true);
-
-	/* Need to pin this module until class device is destroyed */
-	__module_get(THIS_MODULE);
-
-	error = device_add(f_dev);
-	if (error) {
-		dev_err(device, "%s: device_register failed\n", __func__);
-		goto err_put_dev;
-	}
-
-	error = device_create_bin_file(f_dev, &firmware_attr_data);
-	if (error) {
-		dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
-		goto err_del_dev;
-	}
-
-	error = device_create_file(f_dev, &dev_attr_loading);
-	if (error) {
-		dev_err(device, "%s: device_create_file failed\n", __func__);
-		goto err_del_bin_attr;
-	}
-
-	if (uevent)
-		dev_set_uevent_suppress(f_dev, false);
-
 	return fw_priv;
-
-err_del_bin_attr:
-	device_remove_bin_file(f_dev, &firmware_attr_data);
-err_del_dev:
-	device_del(f_dev);
-err_put_dev:
-	put_device(f_dev);
-err_out:
-	return ERR_PTR(error);
 }
 
-static void fw_destroy_instance(struct firmware_priv *fw_priv)
+static struct firmware_priv *
+_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
+			  struct device *device, bool uevent, bool nowait)
 {
-	struct device *f_dev = &fw_priv->dev;
-
-	device_remove_file(f_dev, &dev_attr_loading);
-	device_remove_bin_file(f_dev, &firmware_attr_data);
-	device_unregister(f_dev);
-}
-
-static int _request_firmware(const struct firmware **firmware_p,
-			     const char *name, struct device *device,
-			     bool uevent, bool nowait)
-{
-	struct firmware_priv *fw_priv;
 	struct firmware *firmware;
-	int retval = 0;
+	struct firmware_priv *fw_priv;
 
 	if (!firmware_p)
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
 	if (!firmware) {
 		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
 			__func__);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	if (fw_get_builtin_firmware(firmware, name)) {
 		dev_dbg(device, "firmware: using built-in firmware %s\n", name);
-		return 0;
+		return NULL;
 	}
 
-	read_lock_usermodehelper();
-
-	if (WARN_ON(usermodehelper_is_disabled())) {
-		dev_err(device, "firmware: %s will not be loaded\n", name);
-		retval = -EBUSY;
-		goto out;
-	}
-
-	if (uevent)
-		dev_dbg(device, "firmware: requesting %s\n", name);
-
 	fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
 	if (IS_ERR(fw_priv)) {
-		retval = PTR_ERR(fw_priv);
-		goto out;
+		release_firmware(firmware);
+		*firmware_p = NULL;
+	}
+	return fw_priv;
+}
+
+static void _request_firmware_cleanup(const struct firmware **firmware_p)
+{
+	release_firmware(*firmware_p);
+	*firmware_p = NULL;
+}
+
+static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
+				  long timeout)
+{
+	int retval = 0;
+	struct device *f_dev = &fw_priv->dev;
+
+	dev_set_uevent_suppress(f_dev, true);
+
+	/* Need to pin this module until class device is destroyed */
+	__module_get(THIS_MODULE);
+
+	retval = device_add(f_dev);
+	if (retval) {
+		dev_err(f_dev, "%s: device_register failed\n", __func__);
+		goto err_put_dev;
+	}
+
+	retval = device_create_bin_file(f_dev, &firmware_attr_data);
+	if (retval) {
+		dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
+		goto err_del_dev;
+	}
+
+	retval = device_create_file(f_dev, &dev_attr_loading);
+	if (retval) {
+		dev_err(f_dev, "%s: device_create_file failed\n", __func__);
+		goto err_del_bin_attr;
 	}
 
 	if (uevent) {
-		if (loading_timeout > 0)
+		dev_set_uevent_suppress(f_dev, false);
+		dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id);
+		if (timeout != MAX_SCHEDULE_TIMEOUT)
 			mod_timer(&fw_priv->timeout,
-				  round_jiffies_up(jiffies +
-						   loading_timeout * HZ));
+				  round_jiffies_up(jiffies + timeout));
 
 		kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
 	}
@@ -570,16 +556,13 @@
 	fw_priv->fw = NULL;
 	mutex_unlock(&fw_lock);
 
-	fw_destroy_instance(fw_priv);
-
-out:
-	read_unlock_usermodehelper();
-
-	if (retval) {
-		release_firmware(firmware);
-		*firmware_p = NULL;
-	}
-
+	device_remove_file(f_dev, &dev_attr_loading);
+err_del_bin_attr:
+	device_remove_bin_file(f_dev, &firmware_attr_data);
+err_del_dev:
+	device_del(f_dev);
+err_put_dev:
+	put_device(f_dev);
 	return retval;
 }
 
@@ -602,7 +585,26 @@
 request_firmware(const struct firmware **firmware_p, const char *name,
                  struct device *device)
 {
-        return _request_firmware(firmware_p, name, device, true, false);
+	struct firmware_priv *fw_priv;
+	int ret;
+
+	fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
+					    false);
+	if (IS_ERR_OR_NULL(fw_priv))
+		return PTR_RET(fw_priv);
+
+	ret = usermodehelper_read_trylock();
+	if (WARN_ON(ret)) {
+		dev_err(device, "firmware: %s will not be loaded\n", name);
+	} else {
+		ret = _request_firmware_load(fw_priv, true,
+					firmware_loading_timeout());
+		usermodehelper_read_unlock();
+	}
+	if (ret)
+		_request_firmware_cleanup(firmware_p);
+
+	return ret;
 }
 
 /**
@@ -629,25 +631,39 @@
 	bool uevent;
 };
 
-static int request_firmware_work_func(void *arg)
+static void request_firmware_work_func(struct work_struct *work)
 {
-	struct firmware_work *fw_work = arg;
+	struct firmware_work *fw_work;
 	const struct firmware *fw;
+	struct firmware_priv *fw_priv;
+	long timeout;
 	int ret;
 
-	if (!arg) {
-		WARN_ON(1);
-		return 0;
+	fw_work = container_of(work, struct firmware_work, work);
+	fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
+			fw_work->uevent, true);
+	if (IS_ERR_OR_NULL(fw_priv)) {
+		ret = PTR_RET(fw_priv);
+		goto out;
 	}
 
-	ret = _request_firmware(&fw, fw_work->name, fw_work->device,
-				fw_work->uevent, true);
+	timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
+	if (timeout) {
+		ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
+		usermodehelper_read_unlock();
+	} else {
+		dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
+			fw_work->name);
+		ret = -EAGAIN;
+	}
+	if (ret)
+		_request_firmware_cleanup(&fw);
+
+ out:
 	fw_work->cont(fw, fw_work->context);
 
 	module_put(fw_work->module);
 	kfree(fw_work);
-
-	return ret;
 }
 
 /**
@@ -673,7 +689,6 @@
 	const char *name, struct device *device, gfp_t gfp, void *context,
 	void (*cont)(const struct firmware *fw, void *context))
 {
-	struct task_struct *task;
 	struct firmware_work *fw_work;
 
 	fw_work = kzalloc(sizeof (struct firmware_work), gfp);
@@ -692,15 +707,8 @@
 		return -EFAULT;
 	}
 
-	task = kthread_run(request_firmware_work_func, fw_work,
-			    "firmware/%s", name);
-	if (IS_ERR(task)) {
-		fw_work->cont(NULL, fw_work->context);
-		module_put(fw_work->module);
-		kfree(fw_work);
-		return PTR_ERR(task);
-	}
-
+	INIT_WORK(&fw_work->work, request_firmware_work_func);
+	schedule_work(&fw_work->work);
 	return 0;
 }
 
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 541f821..bd0f394 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -532,6 +532,8 @@
 	dev->power.suspend_time = ktime_set(0, 0);
 	dev->power.max_time_suspended_ns = -1;
 	dev->power.deferred_resume = false;
+	wake_up_all(&dev->power.wait_queue);
+
 	if (retval == -EAGAIN || retval == -EBUSY) {
 		dev->power.runtime_error = 0;
 
@@ -547,7 +549,6 @@
 	} else {
 		pm_runtime_cancel_pending(dev);
 	}
-	wake_up_all(&dev->power.wait_queue);
 	goto out;
 }
 
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 5157fa0..92b779e 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -138,6 +138,7 @@
 	unsigned int base, top;
 	int nodes = 0;
 	int registers = 0;
+	int average;
 
 	mutex_lock(&map->lock);
 
@@ -152,8 +153,13 @@
 		registers += top - base + 1;
 	}
 
+	if (nodes)
+		average = registers / nodes;
+	else
+		average = 0;
+
 	seq_printf(s, "%d nodes, %d registers, average %d registers\n",
-		   nodes, registers, registers / nodes);
+		   nodes, registers, average);
 
 	mutex_unlock(&map->lock);
 
@@ -396,7 +402,7 @@
 							   map->cache_word_size);
 
 			/* Is this the hardware default?  If so skip. */
-			ret = regcache_lookup_reg(map, i);
+			ret = regcache_lookup_reg(map, regtmp);
 			if (ret >= 0 && val == map->reg_defaults[ret].def)
 				continue;
 
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 87f54db..74b6909 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -346,6 +346,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(regcache_sync_region);
 
 /**
  * regcache_cache_only: Put a register map into cache only mode
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 58517a5..251eb70 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -27,12 +27,6 @@
 	return strlen(buf);
 }
 
-static int regmap_open_file(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t regmap_name_read_file(struct file *file,
 				     char __user *user_buf, size_t count,
 				     loff_t *ppos)
@@ -57,7 +51,7 @@
 }
 
 static const struct file_operations regmap_name_fops = {
-	.open = regmap_open_file,
+	.open = simple_open,
 	.read = regmap_name_read_file,
 	.llseek = default_llseek,
 };
@@ -174,7 +168,7 @@
 #endif
 
 static const struct file_operations regmap_map_fops = {
-	.open = regmap_open_file,
+	.open = simple_open,
 	.read = regmap_map_read_file,
 	.write = regmap_map_write_file,
 	.llseek = default_llseek,
@@ -243,7 +237,7 @@
 }
 
 static const struct file_operations regmap_access_fops = {
-	.open = regmap_open_file,
+	.open = simple_open,
 	.read = regmap_access_read_file,
 	.llseek = default_llseek,
 };
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index c1172da..fb7c80f 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -29,7 +29,7 @@
 
 config BCMA_DRIVER_PCI_HOSTMODE
 	bool "Driver for PCI core working in hostmode"
-	depends on BCMA && MIPS
+	depends on BCMA && MIPS && BCMA_HOST_PCI
 	help
 	  PCI core hostmode operation (external PCI bus).
 
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 4e20bcf..d2097a1 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -10,6 +10,7 @@
  */
 
 #include "bcma_private.h"
+#include <linux/pci.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 #include <asm/paccess.h>
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 76a0823..b0b00d7 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1030,37 +1030,6 @@
 	return 0;
 }
 
-static DEFINE_SPINLOCK(floppy_hlt_lock);
-static int hlt_disabled;
-static void floppy_disable_hlt(void)
-{
-	unsigned long flags;
-
-	WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
-	spin_lock_irqsave(&floppy_hlt_lock, flags);
-	if (!hlt_disabled) {
-		hlt_disabled = 1;
-#ifdef HAVE_DISABLE_HLT
-		disable_hlt();
-#endif
-	}
-	spin_unlock_irqrestore(&floppy_hlt_lock, flags);
-}
-
-static void floppy_enable_hlt(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&floppy_hlt_lock, flags);
-	if (hlt_disabled) {
-		hlt_disabled = 0;
-#ifdef HAVE_DISABLE_HLT
-		enable_hlt();
-#endif
-	}
-	spin_unlock_irqrestore(&floppy_hlt_lock, flags);
-}
-
 static void setup_DMA(void)
 {
 	unsigned long f;
@@ -1105,7 +1074,6 @@
 	fd_enable_dma();
 	release_dma_lock(f);
 #endif
-	floppy_disable_hlt();
 }
 
 static void show_floppy(void);
@@ -1707,7 +1675,6 @@
 	fd_disable_dma();
 	release_dma_lock(f);
 
-	floppy_enable_hlt();
 	do_floppy = NULL;
 	if (fdc >= N_FDC || FDCS->address == -1) {
 		/* we don't even know which FDC is the culprit */
@@ -1856,8 +1823,6 @@
 		show_floppy();
 	cancel_activity();
 
-	floppy_enable_hlt();
-
 	flags = claim_dma_lock();
 	fd_disable_dma();
 	release_dma_lock(flags);
@@ -4508,7 +4473,6 @@
 #if N_FDC > 1
 	set_dor(1, ~8, 0);
 #endif
-	floppy_enable_hlt();
 
 	if (floppy_track_buffer && max_buffer_sectors) {
 		tmpsize = max_buffer_sectors * 1024;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d5e1ab9..98cbeba 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1475,7 +1475,7 @@
 	if (!xen_domain())
 		return -ENODEV;
 
-	if (!xen_platform_pci_unplug)
+	if (xen_hvm_domain() && !xen_platform_pci_unplug)
 		return -ENODEV;
 
 	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 48442476..ae9edca 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -72,7 +72,9 @@
 
 	/* Atheros AR3012 with sflash firmware*/
 	{ USB_DEVICE(0x0CF3, 0x3004) },
+	{ USB_DEVICE(0x0CF3, 0x311D) },
 	{ USB_DEVICE(0x13d3, 0x3375) },
+	{ USB_DEVICE(0x04CA, 0x3005) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -89,7 +91,9 @@
 
 	/* Atheros AR3012 with sflash firmware*/
 	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
 
 	{ }	/* Terminating entry */
 };
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index 6c20bbb..428dbb7 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -45,12 +45,6 @@
 	struct dentry *txdnldready;
 };
 
-static int btmrvl_open_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t btmrvl_hscfgcmd_write(struct file *file,
 			const char __user *ubuf, size_t count, loff_t *ppos)
 {
@@ -93,7 +87,7 @@
 static const struct file_operations btmrvl_hscfgcmd_fops = {
 	.read	= btmrvl_hscfgcmd_read,
 	.write	= btmrvl_hscfgcmd_write,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -134,7 +128,7 @@
 static const struct file_operations btmrvl_psmode_fops = {
 	.read	= btmrvl_psmode_read,
 	.write	= btmrvl_psmode_write,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -180,7 +174,7 @@
 static const struct file_operations btmrvl_pscmd_fops = {
 	.read = btmrvl_pscmd_read,
 	.write = btmrvl_pscmd_write,
-	.open = btmrvl_open_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -221,7 +215,7 @@
 static const struct file_operations btmrvl_gpiogap_fops = {
 	.read	= btmrvl_gpiogap_read,
 	.write	= btmrvl_gpiogap_write,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -265,7 +259,7 @@
 static const struct file_operations btmrvl_hscmd_fops = {
 	.read	= btmrvl_hscmd_read,
 	.write	= btmrvl_hscmd_write,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -305,7 +299,7 @@
 static const struct file_operations btmrvl_hsmode_fops = {
 	.read	= btmrvl_hsmode_read,
 	.write	= btmrvl_hsmode_write,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -323,7 +317,7 @@
 
 static const struct file_operations btmrvl_curpsmode_fops = {
 	.read	= btmrvl_curpsmode_read,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -341,7 +335,7 @@
 
 static const struct file_operations btmrvl_psstate_fops = {
 	.read	= btmrvl_psstate_read,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -359,7 +353,7 @@
 
 static const struct file_operations btmrvl_hsstate_fops = {
 	.read	= btmrvl_hsstate_read,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
@@ -378,7 +372,7 @@
 
 static const struct file_operations btmrvl_txdnldready_fops = {
 	.read	= btmrvl_txdnldready_read,
-	.open	= btmrvl_open_generic,
+	.open	= simple_open,
 	.llseek = default_llseek,
 };
 
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 480cad9..3311b81 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -61,7 +61,7 @@
 	{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
 	/* Broadcom SoftSailing reporting vendor specific */
-	{ USB_DEVICE(0x05ac, 0x21e1) },
+	{ USB_DEVICE(0x0a5c, 0x21e1) },
 
 	/* Apple MacBookPro 7,1 */
 	{ USB_DEVICE(0x05ac, 0x8213) },
@@ -103,6 +103,7 @@
 	/* Broadcom BCM20702A0 */
 	{ USB_DEVICE(0x0a5c, 0x21e3) },
 	{ USB_DEVICE(0x0a5c, 0x21e6) },
+	{ USB_DEVICE(0x0a5c, 0x21e8) },
 	{ USB_DEVICE(0x0a5c, 0x21f3) },
 	{ USB_DEVICE(0x413c, 0x8197) },
 
@@ -129,7 +130,9 @@
 
 	/* Atheros 3012 with sflash firmware */
 	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index fd5adb4..98a8c05 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -299,11 +299,11 @@
 			hci_uart_close(hdev);
 
 		if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
-			hu->proto->close(hu);
 			if (hdev) {
 				hci_unregister_dev(hdev);
 				hci_free_dev(hdev);
 			}
+			hu->proto->close(hu);
 		}
 
 		kfree(hu);
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 5da67f1..7ea18a5 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -234,6 +234,7 @@
 #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG		0x0166
 #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB		0x0158  /* Server */
 #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG		0x015A
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG		0x016A
 
 int intel_gmch_probe(struct pci_dev *pdev,
 			       struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 5cf47ac..7f025fb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1190,7 +1190,6 @@
 {
 #ifdef CONFIG_INTEL_IOMMU
 	const unsigned short gpu_devid = intel_private.pcidev->device;
-	extern int intel_iommu_gfx_mapped;
 
 	/* Query intel_iommu to see if we need the workaround. Presumably that
 	 * was loaded first.
@@ -1459,6 +1458,8 @@
 	    "Ivybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
 	    "Ivybridge", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
+	    "Ivybridge", &sandybridge_gtt_driver },
 	{ 0, NULL, NULL }
 };
 
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 57501ca..46118f8 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -301,7 +301,7 @@
 			 * anything critical, chill a bit on each iteration.
 			 */
 			while (wait_event_freezable(apm_suspend_waitqueue,
-					as->suspend_state == SUSPEND_DONE))
+					as->suspend_state != SUSPEND_ACKED))
 				msleep(10);
 			break;
 		case SUSPEND_ACKTO:
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 54ca8b2..4ec04a7 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1260,10 +1260,15 @@
 	uuid = table->data;
 	if (!uuid) {
 		uuid = tmp_uuid;
-		uuid[8] = 0;
-	}
-	if (uuid[8] == 0)
 		generate_random_uuid(uuid);
+	} else {
+		static DEFINE_SPINLOCK(bootid_spinlock);
+
+		spin_lock(&bootid_spinlock);
+		if (!uuid[8])
+			generate_random_uuid(uuid);
+		spin_unlock(&bootid_spinlock);
+	}
 
 	sprintf(buf, "%pU", uuid);
 
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 4dc0194..3b22a60 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -194,17 +194,17 @@
 
 		hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
 				       *f_pos, bytes_this_pass);
-		if (hv_retval > 0) {
-			if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
-				retval = -EFAULT;
-				break;
-			}
-		} else if (hv_retval <= 0) {
+		if (hv_retval <= 0) {
 			if (retval == 0)
 				retval = hv_retval;
 			break;
 		}
 
+		if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
+			retval = -EFAULT;
+			break;
+		}
+
 		retval += hv_retval;
 		*f_pos += hv_retval;
 		buf += hv_retval;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index b58b561..ddf86b6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1038,12 +1038,6 @@
 	.attrs = port_sysfs_entries,
 };
 
-static int debugfs_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
 			    size_t count, loff_t *offp)
 {
@@ -1087,7 +1081,7 @@
 
 static const struct file_operations port_debugfs_ops = {
 	.owner = THIS_MODULE,
-	.open  = debugfs_open,
+	.open  = simple_open,
 	.read  = debugfs_read,
 };
 
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 32d790d..ffbb446 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -51,9 +51,6 @@
 config ARM_EXYNOS_CPUFREQ
 	bool "SAMSUNG EXYNOS SoCs"
 	depends on ARCH_EXYNOS
-	select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210
-	select ARM_EXYNOS4X12_CPUFREQ if (SOC_EXYNOS4212 || SOC_EXYNOS4412)
-	select ARM_EXYNOS5250_CPUFREQ if SOC_EXYNOS5250
 	default y
 	help
 	  This adds the CPUFreq driver common part for Samsung
@@ -62,20 +59,19 @@
 	  If in doubt, say N.
 
 config ARM_EXYNOS4210_CPUFREQ
-	bool "Samsung EXYNOS4210"
-	depends on ARCH_EXYNOS
+	def_bool CPU_EXYNOS4210
 	help
 	  This adds the CPUFreq driver for Samsung EXYNOS4210
 	  SoC (S5PV310 or S5PC210).
 
 config ARM_EXYNOS4X12_CPUFREQ
-	bool "Samsung EXYNOS4X12"
+	def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
 	help
 	  This adds the CPUFreq driver for Samsung EXYNOS4X12
 	  SoC (EXYNOS4212 or EXYNOS4412).
 
 config ARM_EXYNOS5250_CPUFREQ
-	bool "Samsung EXYNOS5250"
+	def_bool SOC_EXYNOS5250
 	help
 	  This adds the CPUFreq driver for Samsung EXYNOS5250
 	  SoC.
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index a22ffa5..0bf1b89 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -142,7 +142,7 @@
 	policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
 
 	/* policy sharing between dual CPUs */
-	cpumask_copy(policy->cpus, &cpu_present_map);
+	cpumask_copy(policy->cpus, cpu_present_mask);
 
 	policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6588f43..2f0083a 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -53,6 +53,55 @@
 
 static int __cpuidle_register_device(struct cpuidle_device *dev);
 
+static inline int cpuidle_enter(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv, int index)
+{
+	struct cpuidle_state *target_state = &drv->states[index];
+	return target_state->enter(dev, drv, index);
+}
+
+static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
+			       struct cpuidle_driver *drv, int index)
+{
+	return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
+}
+
+typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
+			       struct cpuidle_driver *drv, int index);
+
+static cpuidle_enter_t cpuidle_enter_ops;
+
+/**
+ * cpuidle_play_dead - cpu off-lining
+ *
+ * Returns in case of an error or no driver
+ */
+int cpuidle_play_dead(void)
+{
+	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+	struct cpuidle_driver *drv = cpuidle_get_driver();
+	int i, dead_state = -1;
+	int power_usage = -1;
+
+	if (!drv)
+		return -ENODEV;
+
+	/* Find lowest-power state that supports long-term idle */
+	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+		struct cpuidle_state *s = &drv->states[i];
+
+		if (s->power_usage < power_usage && s->enter_dead) {
+			power_usage = s->power_usage;
+			dead_state = i;
+		}
+	}
+
+	if (dead_state != -1)
+		return drv->states[dead_state].enter_dead(dev, dead_state);
+
+	return -ENODEV;
+}
+
 /**
  * cpuidle_idle_call - the main idle loop
  *
@@ -63,7 +112,6 @@
 {
 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
 	struct cpuidle_driver *drv = cpuidle_get_driver();
-	struct cpuidle_state *target_state;
 	int next_state, entered_state;
 
 	if (off)
@@ -92,12 +140,10 @@
 		return 0;
 	}
 
-	target_state = &drv->states[next_state];
-
 	trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
 	trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
-	entered_state = target_state->enter(dev, drv, next_state);
+	entered_state = cpuidle_enter_ops(dev, drv, next_state);
 
 	trace_power_end_rcuidle(dev->cpu);
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -110,6 +156,8 @@
 		dev->states_usage[entered_state].time +=
 				(unsigned long long)dev->last_residency;
 		dev->states_usage[entered_state].usage++;
+	} else {
+		dev->last_residency = 0;
 	}
 
 	/* give the governor an opportunity to reflect on the outcome */
@@ -164,6 +212,37 @@
 
 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 
+/**
+ * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
+ * @dev: pointer to a valid cpuidle_device object
+ * @drv: pointer to a valid cpuidle_driver object
+ * @index: index of the target cpuidle state.
+ */
+int cpuidle_wrap_enter(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv, int index,
+				int (*enter)(struct cpuidle_device *dev,
+					struct cpuidle_driver *drv, int index))
+{
+	ktime_t time_start, time_end;
+	s64 diff;
+
+	time_start = ktime_get();
+
+	index = enter(dev, drv, index);
+
+	time_end = ktime_get();
+
+	local_irq_enable();
+
+	diff = ktime_to_us(ktime_sub(time_end, time_start));
+	if (diff > INT_MAX)
+		diff = INT_MAX;
+
+	dev->last_residency = (int) diff;
+
+	return index;
+}
+
 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
 static int poll_idle(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int index)
@@ -197,6 +276,7 @@
 	state->power_usage = -1;
 	state->flags = 0;
 	state->enter = poll_idle;
+	state->disable = 0;
 }
 #else
 static void poll_idle_init(struct cpuidle_driver *drv) {}
@@ -212,13 +292,14 @@
 int cpuidle_enable_device(struct cpuidle_device *dev)
 {
 	int ret, i;
+	struct cpuidle_driver *drv = cpuidle_get_driver();
 
 	if (dev->enabled)
 		return 0;
-	if (!cpuidle_get_driver() || !cpuidle_curr_governor)
+	if (!drv || !cpuidle_curr_governor)
 		return -EIO;
 	if (!dev->state_count)
-		return -EINVAL;
+		dev->state_count = drv->state_count;
 
 	if (dev->registered == 0) {
 		ret = __cpuidle_register_device(dev);
@@ -226,13 +307,16 @@
 			return ret;
 	}
 
-	poll_idle_init(cpuidle_get_driver());
+	cpuidle_enter_ops = drv->en_core_tk_irqen ?
+		cpuidle_enter_tk : cpuidle_enter;
+
+	poll_idle_init(drv);
 
 	if ((ret = cpuidle_add_state_sysfs(dev)))
 		return ret;
 
 	if (cpuidle_curr_governor->enable &&
-	    (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
+	    (ret = cpuidle_curr_governor->enable(drv, dev)))
 		goto fail_sysfs;
 
 	for (i = 0; i < dev->state_count; i++) {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 284d7af..40cd3f3 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -47,7 +47,7 @@
  */
 int cpuidle_register_driver(struct cpuidle_driver *drv)
 {
-	if (!drv)
+	if (!drv || !drv->state_count)
 		return -EINVAL;
 
 	if (cpuidle_disabled())
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index ad09526..0633575 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -236,7 +236,7 @@
 {
 	struct menu_device *data = &__get_cpu_var(menu_devices);
 	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
-	unsigned int power_usage = -1;
+	int power_usage = -1;
 	int i;
 	int multiplier;
 	struct timespec t;
@@ -280,7 +280,8 @@
 	 * We want to default to C1 (hlt), not to busy polling
 	 * unless the timer is happening really really soon.
 	 */
-	if (data->expected_us > 5)
+	if (data->expected_us > 5 &&
+		drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0)
 		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
 
 	/*
@@ -290,6 +291,8 @@
 	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
 		struct cpuidle_state *s = &drv->states[i];
 
+		if (s->disable)
+			continue;
 		if (s->target_residency > data->predicted_us)
 			continue;
 		if (s->exit_latency > latency_req)
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 3fe41fe..88032b4 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -11,6 +11,7 @@
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
+#include <linux/capability.h>
 
 #include "cpuidle.h"
 
@@ -222,6 +223,9 @@
 #define define_one_state_ro(_name, show) \
 static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
 
+#define define_one_state_rw(_name, show, store) \
+static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
+
 #define define_show_state_function(_name) \
 static ssize_t show_state_##_name(struct cpuidle_state *state, \
 			 struct cpuidle_state_usage *state_usage, char *buf) \
@@ -229,6 +233,24 @@
 	return sprintf(buf, "%u\n", state->_name);\
 }
 
+#define define_store_state_function(_name) \
+static ssize_t store_state_##_name(struct cpuidle_state *state, \
+		const char *buf, size_t size) \
+{ \
+	long value; \
+	int err; \
+	if (!capable(CAP_SYS_ADMIN)) \
+		return -EPERM; \
+	err = kstrtol(buf, 0, &value); \
+	if (err) \
+		return err; \
+	if (value) \
+		state->disable = 1; \
+	else \
+		state->disable = 0; \
+	return size; \
+}
+
 #define define_show_state_ull_function(_name) \
 static ssize_t show_state_##_name(struct cpuidle_state *state, \
 			struct cpuidle_state_usage *state_usage, char *buf) \
@@ -251,6 +273,8 @@
 define_show_state_ull_function(time)
 define_show_state_str_function(name)
 define_show_state_str_function(desc)
+define_show_state_function(disable)
+define_store_state_function(disable)
 
 define_one_state_ro(name, show_state_name);
 define_one_state_ro(desc, show_state_desc);
@@ -258,6 +282,7 @@
 define_one_state_ro(power, show_state_power_usage);
 define_one_state_ro(usage, show_state_usage);
 define_one_state_ro(time, show_state_time);
+define_one_state_rw(disable, show_state_disable, store_state_disable);
 
 static struct attribute *cpuidle_state_default_attrs[] = {
 	&attr_name.attr,
@@ -266,6 +291,7 @@
 	&attr_power.attr,
 	&attr_usage.attr,
 	&attr_time.attr,
+	&attr_disable.attr,
 	NULL
 };
 
@@ -287,8 +313,22 @@
 	return ret;
 }
 
+static ssize_t cpuidle_state_store(struct kobject *kobj,
+	struct attribute *attr, const char *buf, size_t size)
+{
+	int ret = -EIO;
+	struct cpuidle_state *state = kobj_to_state(kobj);
+	struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
+
+	if (cattr->store)
+		ret = cattr->store(state, buf, size);
+
+	return ret;
+}
+
 static const struct sysfs_ops cpuidle_state_sysfs_ops = {
 	.show = cpuidle_state_show,
+	.store = cpuidle_state_store,
 };
 
 static void cpuidle_state_sysfs_release(struct kobject *kobj)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index dc89455..750925f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -104,13 +104,6 @@
 static struct coh901318_base *debugfs_dma_base;
 static struct dentry *dma_dentry;
 
-static int coh901318_debugfs_open(struct inode *inode, struct file *file)
-{
-
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static int coh901318_debugfs_read(struct file *file, char __user *buf,
 				  size_t count, loff_t *f_pos)
 {
@@ -158,7 +151,7 @@
 
 static const struct file_operations coh901318_debugfs_status_operations = {
 	.owner		= THIS_MODULE,
-	.open		= coh901318_debugfs_open,
+	.open		= simple_open,
 	.read		= coh901318_debugfs_read,
 	.llseek		= default_llseek,
 };
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc3..2397f6f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@
 }
 EXPORT_SYMBOL(dma_find_channel);
 
+/*
+ * net_dma_find_channel - find a channel for net_dma
+ * net_dma has alignment requirements
+ */
+struct dma_chan *net_dma_find_channel(void)
+{
+	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
+	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
+		return NULL;
+
+	return chan;
+}
+EXPORT_SYMBOL(net_dma_find_channel);
+
 /**
  * dma_issue_pending_all - flush all pending operations across all channels
  */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d8..73b2b65 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@
 			   PCI_DMA_TODEVICE, flags, 0);
 }
 
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
 {
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 	u64 completion;
 
 	completion = *chan->completion;
@@ -569,7 +569,7 @@
 }
 
 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   unsigned long *phys_complete)
+			   dma_addr_t *phys_complete)
 {
 	*phys_complete = ioat_get_current_completion(chan);
 	if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@
 	return true;
 }
 
-static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
 {
 	struct ioat_chan_common *chan = &ioat->base;
 	struct list_head *_desc, *n;
 	struct dma_async_tx_descriptor *tx;
 
-	dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
-		 __func__, phys_complete);
+	dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
+		 __func__, (unsigned long long) phys_complete);
 	list_for_each_safe(_desc, n, &ioat->used_desc) {
 		struct ioat_desc_sw *desc;
 
@@ -652,7 +652,7 @@
 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	prefetch(chan->completion);
 
@@ -698,7 +698,7 @@
 		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 		spin_unlock_bh(&ioat->desc_lock);
 	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		unsigned long phys_complete;
+		dma_addr_t phys_complete;
 
 		spin_lock_bh(&ioat->desc_lock);
 		/* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bc..5e8fe01 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@
 struct ioat_chan_common {
 	struct dma_chan common;
 	void __iomem *reg_base;
-	unsigned long last_completion;
+	dma_addr_t last_completion;
 	spinlock_t cleanup_lock;
 	unsigned long state;
 	#define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@
 void __devexit ioat_dma_remove(struct ioatdma_device *device);
 struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
 					      void __iomem *iobase);
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
 void ioat_init_channel(struct ioatdma_device *device,
 		       struct ioat_chan_common *chan, int idx);
 enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@
 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
 		    size_t len, struct ioat_dma_descriptor *hw);
 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   unsigned long *phys_complete);
+			   dma_addr_t *phys_complete);
 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
 void ioat_kobject_del(struct ioatdma_device *device);
 extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110f..8689576 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@
 	spin_unlock_bh(&ioat->prep_lock);
 }
 
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
 {
 	struct ioat_chan_common *chan = &ioat->base;
 	struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@
 static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	spin_lock_bh(&chan->cleanup_lock);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@
 static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	ioat2_quiesce(chan, 0);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@
 	struct ioat_chan_common *chan = &ioat->base;
 
 	if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		unsigned long phys_complete;
+		dma_addr_t phys_complete;
 		u64 status;
 
 		status = ioat_chansts(chan);
@@ -572,9 +572,9 @@
 	 */
 	struct ioat_chan_common *chan = &ioat->base;
 	struct dma_chan *c = &chan->common;
-	const u16 curr_size = ioat2_ring_size(ioat);
+	const u32 curr_size = ioat2_ring_size(ioat);
 	const u16 active = ioat2_ring_active(ioat);
-	const u16 new_size = 1 << order;
+	const u32 new_size = 1 << order;
 	struct ioat_ring_ent **ring;
 	u16 i;
 
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b..be2a55b 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@
 	return container_of(chan, struct ioat2_dma_chan, base);
 }
 
-static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
 {
 	return 1 << ioat->alloc_order;
 }
@@ -91,7 +91,7 @@
 	return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
 }
 
-static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
 {
 	return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
 }
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c..f7f1dc6 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@
  * The difference from the dma_v2.c __cleanup() is that this routine
  * handles extended descriptors and dma-unmapping raid operations.
  */
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
 {
 	struct ioat_chan_common *chan = &ioat->base;
 	struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@
 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	spin_lock_bh(&chan->cleanup_lock);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@
 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	ioat2_quiesce(chan, 0);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@
 	struct ioat_chan_common *chan = &ioat->base;
 
 	if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		unsigned long phys_complete;
+		dma_addr_t phys_complete;
 		u64 status;
 
 		status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@
 	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
 }
 
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+		return true;
+	default:
+		return false;
+	}
+}
+
 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
 {
 	struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@
 	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
 	dma->device_free_chan_resources = ioat2_free_chan_resources;
 
+	if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
+		dma->copy_align = 6;
+
 	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
 	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
 
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2..79e3eba 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@
 	struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
 	/* address conversion buffers (dma_map / page_address) */
 	void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
-	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
-	dma_addr_t pq_dest[2];
+	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
+	dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
 
 	int i;
 	struct dma_async_tx_descriptor *tx;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 65334c4..c81ef7e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,10 +22,10 @@
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
+#include <linux/fsl/mxs-dma.h>
 
 #include <asm/irq.h>
 #include <mach/mxs.h>
-#include <mach/dma.h>
 #include <mach/common.h>
 
 #include "dmaengine.h"
@@ -337,10 +337,32 @@
 	clk_disable_unprepare(mxs_dma->clk);
 }
 
+/*
+ * How to use the flags for ->device_prep_slave_sg() :
+ *    [1] If there is only one DMA command in the DMA chain, the code should be:
+ *            ......
+ *            ->device_prep_slave_sg(DMA_CTRL_ACK);
+ *            ......
+ *    [2] If there are two DMA commands in the DMA chain, the code should be
+ *            ......
+ *            ->device_prep_slave_sg(0);
+ *            ......
+ *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ *            ......
+ *    [3] If there are more than two DMA commands in the DMA chain, the code
+ *        should be:
+ *            ......
+ *            ->device_prep_slave_sg(0);                                // First
+ *            ......
+ *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ *            ......
+ *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ *            ......
+ */
 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
 		struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_transfer_direction direction,
-		unsigned long append, void *context)
+		unsigned long flags, void *context)
 {
 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -348,6 +370,7 @@
 	struct scatterlist *sg;
 	int i, j;
 	u32 *pio;
+	bool append = flags & DMA_PREP_INTERRUPT;
 	int idx = append ? mxs_chan->desc_count : 0;
 
 	if (mxs_chan->status == DMA_IN_PROGRESS && !append)
@@ -374,7 +397,6 @@
 		ccw->bits |= CCW_CHAIN;
 		ccw->bits &= ~CCW_IRQ;
 		ccw->bits &= ~CCW_DEC_SEM;
-		ccw->bits &= ~CCW_WAIT4END;
 	} else {
 		idx = 0;
 	}
@@ -389,7 +411,8 @@
 		ccw->bits = 0;
 		ccw->bits |= CCW_IRQ;
 		ccw->bits |= CCW_DEC_SEM;
-		ccw->bits |= CCW_WAIT4END;
+		if (flags & DMA_CTRL_ACK)
+			ccw->bits |= CCW_WAIT4END;
 		ccw->bits |= CCW_HALT_ON_TERM;
 		ccw->bits |= CCW_TERM_FLUSH;
 		ccw->bits |= BF_CCW(sg_len, PIO_NUM);
@@ -420,7 +443,8 @@
 				ccw->bits &= ~CCW_CHAIN;
 				ccw->bits |= CCW_IRQ;
 				ccw->bits |= CCW_DEC_SEM;
-				ccw->bits |= CCW_WAIT4END;
+				if (flags & DMA_CTRL_ACK)
+					ccw->bits |= CCW_WAIT4END;
 			}
 		}
 	}
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 16a6b48..ec78cce 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -585,7 +585,7 @@
 
 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
-	enum dma_transfer_direction dir, unsigned long flags)
+	enum dma_transfer_direction dir, unsigned long flags, void *context)
 {
 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	struct sa11x0_dma_desc *txd;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 36e1486..d0c372e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -754,9 +754,7 @@
 	if (c->x86_vendor != X86_VENDOR_AMD)
 		return 0;
 
-	if ((c->x86 < 0xf || c->x86 > 0x12) &&
-	    (c->x86 != 0x14 || c->x86_model > 0xf) &&
-	    (c->x86 != 0x15 || c->x86_model > 0xf))
+	if (c->x86 < 0xf || c->x86 > 0x15)
 		return 0;
 
 	fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -797,7 +795,7 @@
 		break;
 
 	default:
-		printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86);
+		printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
 		kfree(fam_ops);
 		return -EINVAL;
 	}
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 1d5cf06..e99d009 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -145,7 +145,11 @@
 	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
 
 	mci->mod_name = DRV_NAME;
+#ifdef __tilegx__
+	mci->ctl_name = "TILEGx_Memory_Controller";
+#else
 	mci->ctl_name = "TILEPro_Memory_Controller";
+#endif
 	mci->dev_name = dev_name(&pdev->dev);
 	mci->edac_check = tile_edac_check;
 
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 32de670..12f349b3 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -22,7 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/irqdomain.h>
@@ -37,7 +37,8 @@
 #define GPIO_PORT(x)		(((x) >> 3) & 0x3)
 #define GPIO_BIT(x)		((x) & 0x7)
 
-#define GPIO_REG(x)		(GPIO_BANK(x) * 0x80 + GPIO_PORT(x) * 4)
+#define GPIO_REG(x)		(GPIO_BANK(x) * tegra_gpio_bank_stride + \
+					GPIO_PORT(x) * 4)
 
 #define GPIO_CNF(x)		(GPIO_REG(x) + 0x00)
 #define GPIO_OE(x)		(GPIO_REG(x) + 0x10)
@@ -48,12 +49,12 @@
 #define GPIO_INT_LVL(x)		(GPIO_REG(x) + 0x60)
 #define GPIO_INT_CLR(x)		(GPIO_REG(x) + 0x70)
 
-#define GPIO_MSK_CNF(x)		(GPIO_REG(x) + 0x800)
-#define GPIO_MSK_OE(x)		(GPIO_REG(x) + 0x810)
-#define GPIO_MSK_OUT(x)		(GPIO_REG(x) + 0X820)
-#define GPIO_MSK_INT_STA(x)	(GPIO_REG(x) + 0x840)
-#define GPIO_MSK_INT_ENB(x)	(GPIO_REG(x) + 0x850)
-#define GPIO_MSK_INT_LVL(x)	(GPIO_REG(x) + 0x860)
+#define GPIO_MSK_CNF(x)		(GPIO_REG(x) + tegra_gpio_upper_offset + 0x00)
+#define GPIO_MSK_OE(x)		(GPIO_REG(x) + tegra_gpio_upper_offset + 0x10)
+#define GPIO_MSK_OUT(x)		(GPIO_REG(x) + tegra_gpio_upper_offset + 0X20)
+#define GPIO_MSK_INT_STA(x)	(GPIO_REG(x) + tegra_gpio_upper_offset + 0x40)
+#define GPIO_MSK_INT_ENB(x)	(GPIO_REG(x) + tegra_gpio_upper_offset + 0x50)
+#define GPIO_MSK_INT_LVL(x)	(GPIO_REG(x) + tegra_gpio_upper_offset + 0x60)
 
 #define GPIO_INT_LVL_MASK		0x010101
 #define GPIO_INT_LVL_EDGE_RISING	0x000101
@@ -78,6 +79,8 @@
 static struct irq_domain *irq_domain;
 static void __iomem *regs;
 static u32 tegra_gpio_bank_count;
+static u32 tegra_gpio_bank_stride;
+static u32 tegra_gpio_upper_offset;
 static struct tegra_gpio_bank *tegra_gpio_banks;
 
 static inline void tegra_gpio_writel(u32 val, u32 reg)
@@ -333,6 +336,26 @@
 #endif
 };
 
+struct tegra_gpio_soc_config {
+	u32 bank_stride;
+	u32 upper_offset;
+};
+
+static struct tegra_gpio_soc_config tegra20_gpio_config = {
+	.bank_stride = 0x80,
+	.upper_offset = 0x800,
+};
+
+static struct tegra_gpio_soc_config tegra30_gpio_config = {
+	.bank_stride = 0x100,
+	.upper_offset = 0x80,
+};
+
+static struct of_device_id tegra_gpio_of_match[] __devinitdata = {
+	{ .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config },
+	{ .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
+	{ },
+};
 
 /* This lock class tells lockdep that GPIO irqs are in a different
  * category than their parents, so it won't report false recursion.
@@ -341,6 +364,8 @@
 
 static int __devinit tegra_gpio_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *match;
+	struct tegra_gpio_soc_config *config;
 	int irq_base;
 	struct resource *res;
 	struct tegra_gpio_bank *bank;
@@ -348,6 +373,15 @@
 	int i;
 	int j;
 
+	match = of_match_device(tegra_gpio_of_match, &pdev->dev);
+	if (match)
+		config = (struct tegra_gpio_soc_config *)match->data;
+	else
+		config = &tegra20_gpio_config;
+
+	tegra_gpio_bank_stride = config->bank_stride;
+	tegra_gpio_upper_offset = config->upper_offset;
+
 	for (;;) {
 		res = platform_get_resource(pdev, IORESOURCE_IRQ, tegra_gpio_bank_count);
 		if (!res)
@@ -402,7 +436,7 @@
 		return -ENODEV;
 	}
 
-	for (i = 0; i < 7; i++) {
+	for (i = 0; i < tegra_gpio_bank_count; i++) {
 		for (j = 0; j < 4; j++) {
 			int gpio = tegra_gpio_compose(i, j, 0);
 			tegra_gpio_writel(0x00, GPIO_INT_ENB(gpio));
@@ -441,11 +475,6 @@
 	return 0;
 }
 
-static struct of_device_id tegra_gpio_of_match[] __devinitdata = {
-	{ .compatible = "nvidia,tegra20-gpio", },
-	{ },
-};
-
 static struct platform_driver tegra_gpio_driver = {
 	.driver		= {
 		.name	= "tegra-gpio",
@@ -485,7 +514,7 @@
 	int i;
 	int j;
 
-	for (i = 0; i < 7; i++) {
+	for (i = 0; i < tegra_gpio_bank_count; i++) {
 		for (j = 0; j < 4; j++) {
 			int gpio = tegra_gpio_compose(i, j, 0);
 			seq_printf(s,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cc11488..e354bc0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@
 	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
 	select I2C
 	select I2C_ALGOBIT
+	select DMA_SHARED_BUFFER
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a858532..c20da5b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@
 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_modes.o drm_edid.o \
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
-		drm_trace_points.o drm_global.o
+		drm_trace_points.o drm_global.o drm_prime.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0b65fbc..6116e3b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -136,6 +136,10 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7740dd2..a0d6e89 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -559,9 +559,13 @@
 		return -EINVAL;
 
 	/* Need to resize the fb object !!! */
-	if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+	if (var->bits_per_pixel > fb->bits_per_pixel ||
+	    var->xres > fb->width || var->yres > fb->height ||
+	    var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
 		DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
-			  "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+			  "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+			  var->xres, var->yres, var->bits_per_pixel,
+			  var->xres_virtual, var->yres_virtual,
 			  fb->width, fb->height, fb->bits_per_pixel);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7348a3d..cdfbf27 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -271,6 +271,9 @@
 	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_open(dev, priv);
 
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_prime_init_file_private(&priv->prime);
+
 	if (dev->driver->open) {
 		ret = dev->driver->open(dev, priv);
 		if (ret < 0)
@@ -571,6 +574,10 @@
 
 	if (dev->driver->postclose)
 		dev->driver->postclose(dev, file_priv);
+
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_prime_destroy_file_private(&file_priv->prime);
+
 	kfree(file_priv);
 
 	/* ========================================================
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 0ef358e..83114b5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -35,6 +35,7 @@
 #include <linux/mman.h>
 #include <linux/pagemap.h>
 #include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
 #include "drmP.h"
 
 /** @file drm_gem.c
@@ -232,6 +233,10 @@
 	idr_remove(&filp->object_idr, handle);
 	spin_unlock(&filp->table_lock);
 
+	if (obj->import_attach)
+		drm_prime_remove_imported_buf_handle(&filp->prime,
+				obj->import_attach->dmabuf);
+
 	if (dev->driver->gem_close_object)
 		dev->driver->gem_close_object(obj, filp);
 	drm_gem_object_handle_unreference_unlocked(obj);
@@ -527,6 +532,10 @@
 	struct drm_gem_object *obj = ptr;
 	struct drm_device *dev = obj->dev;
 
+	if (obj->import_attach)
+		drm_prime_remove_imported_buf_handle(&file_priv->prime,
+				obj->import_attach->dmabuf);
+
 	if (dev->driver->gem_close_object)
 		dev->driver->gem_close_object(obj, file_priv);
 
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
new file mode 100644
index 0000000..1bdf2b5
--- /dev/null
+++ b/drivers/gpu/drm/drm_prime.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright © 2012 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *      Dave Airlie <airlied@redhat.com>
+ *      Rob Clark <rob.clark@linaro.org>
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#include "drmP.h"
+
+/*
+ * DMA-BUF/GEM Object references and lifetime overview:
+ *
+ * On the export the dma_buf holds a reference to the exporting GEM
+ * object. It takes this reference in handle_to_fd_ioctl, when it
+ * first calls .prime_export and stores the exporting GEM object in
+ * the dma_buf priv. This reference is released when the dma_buf
+ * object goes away in the driver .release function.
+ *
+ * On the import the importing GEM object holds a reference to the
+ * dma_buf (which in turn holds a ref to the exporting GEM object).
+ * It takes that reference in the fd_to_handle ioctl.
+ * It calls dma_buf_get, creates an attachment to it and stores the
+ * attachment in the GEM object. When this attachment is destroyed
+ * when the imported object is destroyed, we remove the attachment
+ * and drop the reference to the dma_buf.
+ *
+ * Thus the chain of references always flows in one direction
+ * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
+ *
+ * Self-importing: if userspace is using PRIME as a replacement for flink
+ * then it will get a fd->handle request for a GEM object that it created.
+ * Drivers should detect this situation and return back the gem object
+ * from the dma-buf private.
+ */
+
+struct drm_prime_member {
+	struct list_head entry;
+	struct dma_buf *dma_buf;
+	uint32_t handle;
+};
+
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+		int *prime_fd)
+{
+	struct drm_gem_object *obj;
+	void *buf;
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
+	mutex_lock(&file_priv->prime.lock);
+	/* re-export the original imported object */
+	if (obj->import_attach) {
+		get_dma_buf(obj->import_attach->dmabuf);
+		*prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
+		drm_gem_object_unreference_unlocked(obj);
+		mutex_unlock(&file_priv->prime.lock);
+		return 0;
+	}
+
+	if (obj->export_dma_buf) {
+		get_dma_buf(obj->export_dma_buf);
+		*prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
+		drm_gem_object_unreference_unlocked(obj);
+	} else {
+		buf = dev->driver->gem_prime_export(dev, obj, flags);
+		if (IS_ERR(buf)) {
+			/* normally the created dma-buf takes ownership of the ref,
+			 * but if that fails then drop the ref
+			 */
+			drm_gem_object_unreference_unlocked(obj);
+			mutex_unlock(&file_priv->prime.lock);
+			return PTR_ERR(buf);
+		}
+		obj->export_dma_buf = buf;
+		*prime_fd = dma_buf_fd(buf, flags);
+	}
+	mutex_unlock(&file_priv->prime.lock);
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+		struct drm_file *file_priv, int prime_fd, uint32_t *handle)
+{
+	struct dma_buf *dma_buf;
+	struct drm_gem_object *obj;
+	int ret;
+
+	dma_buf = dma_buf_get(prime_fd);
+	if (IS_ERR(dma_buf))
+		return PTR_ERR(dma_buf);
+
+	mutex_lock(&file_priv->prime.lock);
+
+	ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+			dma_buf, handle);
+	if (!ret) {
+		ret = 0;
+		goto out_put;
+	}
+
+	/* never seen this one, need to import */
+	obj = dev->driver->gem_prime_import(dev, dma_buf);
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto out_put;
+	}
+
+	ret = drm_gem_handle_create(file_priv, obj, handle);
+	drm_gem_object_unreference_unlocked(obj);
+	if (ret)
+		goto out_put;
+
+	ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+			dma_buf, *handle);
+	if (ret)
+		goto fail;
+
+	mutex_unlock(&file_priv->prime.lock);
+	return 0;
+
+fail:
+	/* hmm, if driver attached, we are relying on the free-object path
+	 * to detach.. which seems ok..
+	 */
+	drm_gem_object_handle_unreference_unlocked(obj);
+out_put:
+	dma_buf_put(dma_buf);
+	mutex_unlock(&file_priv->prime.lock);
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_prime_handle *args = data;
+	uint32_t flags;
+
+	if (!drm_core_check_feature(dev, DRIVER_PRIME))
+		return -EINVAL;
+
+	if (!dev->driver->prime_handle_to_fd)
+		return -ENOSYS;
+
+	/* check flags are valid */
+	if (args->flags & ~DRM_CLOEXEC)
+		return -EINVAL;
+
+	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
+	flags = args->flags & DRM_CLOEXEC;
+
+	return dev->driver->prime_handle_to_fd(dev, file_priv,
+			args->handle, flags, &args->fd);
+}
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_prime_handle *args = data;
+
+	if (!drm_core_check_feature(dev, DRIVER_PRIME))
+		return -EINVAL;
+
+	if (!dev->driver->prime_fd_to_handle)
+		return -ENOSYS;
+
+	return dev->driver->prime_fd_to_handle(dev, file_priv,
+			args->fd, &args->handle);
+}
+
+/*
+ * drm_prime_pages_to_sg
+ *
+ * this helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+{
+	struct sg_table *sg = NULL;
+	struct scatterlist *iter;
+	int i;
+	int ret;
+
+	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sg)
+		goto out;
+
+	ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
+	if (ret)
+		goto out;
+
+	for_each_sg(sg->sgl, iter, nr_pages, i)
+		sg_set_page(iter, pages[i], PAGE_SIZE, 0);
+
+	return sg;
+out:
+	kfree(sg);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/* helper function to cleanup a GEM/prime object */
+void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+{
+	struct dma_buf_attachment *attach;
+	struct dma_buf *dma_buf;
+	attach = obj->import_attach;
+	if (sg)
+		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+	dma_buf = attach->dmabuf;
+	dma_buf_detach(attach->dmabuf, attach);
+	/* remove the reference */
+	dma_buf_put(dma_buf);
+}
+EXPORT_SYMBOL(drm_prime_gem_destroy);
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+	INIT_LIST_HEAD(&prime_fpriv->head);
+	mutex_init(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_init_file_private);
+
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+	struct drm_prime_member *member, *safe;
+	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+		list_del(&member->entry);
+		kfree(member);
+	}
+}
+EXPORT_SYMBOL(drm_prime_destroy_file_private);
+
+int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+{
+	struct drm_prime_member *member;
+
+	member = kmalloc(sizeof(*member), GFP_KERNEL);
+	if (!member)
+		return -ENOMEM;
+
+	member->dma_buf = dma_buf;
+	member->handle = handle;
+	list_add(&member->entry, &prime_fpriv->head);
+	return 0;
+}
+EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
+
+int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+{
+	struct drm_prime_member *member;
+
+	list_for_each_entry(member, &prime_fpriv->head, entry) {
+		if (member->dma_buf == dma_buf) {
+			*handle = member->handle;
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+
+void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+{
+	struct drm_prime_member *member, *safe;
+
+	mutex_lock(&prime_fpriv->lock);
+	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+		if (member->dma_buf == dma_buf) {
+			list_del(&member->entry);
+			kfree(member);
+		}
+	}
+	mutex_unlock(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 4a3a5f7..de8d209 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -34,14 +34,14 @@
 static int lowlevel_buffer_allocate(struct drm_device *dev,
 		unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
-	dma_addr_t start_addr, end_addr;
+	dma_addr_t start_addr;
 	unsigned int npages, page_size, i = 0;
 	struct scatterlist *sgl;
 	int ret = 0;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (flags & EXYNOS_BO_NONCONTIG) {
+	if (IS_NONCONTIG_BUFFER(flags)) {
 		DRM_DEBUG_KMS("not support allocation type.\n");
 		return -EINVAL;
 	}
@@ -52,13 +52,13 @@
 	}
 
 	if (buf->size >= SZ_1M) {
-		npages = (buf->size >> SECTION_SHIFT) + 1;
+		npages = buf->size >> SECTION_SHIFT;
 		page_size = SECTION_SIZE;
 	} else if (buf->size >= SZ_64K) {
-		npages = (buf->size >> 16) + 1;
+		npages = buf->size >> 16;
 		page_size = SZ_64K;
 	} else {
-		npages = (buf->size >> PAGE_SHIFT) + 1;
+		npages = buf->size >> PAGE_SHIFT;
 		page_size = PAGE_SIZE;
 	}
 
@@ -76,26 +76,13 @@
 		return -ENOMEM;
 	}
 
-		buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
-				&buf->dma_addr, GFP_KERNEL);
-		if (!buf->kvaddr) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			ret = -ENOMEM;
-			goto err1;
-		}
-
-		start_addr = buf->dma_addr;
-		end_addr = buf->dma_addr + buf->size;
-
-		buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate pages.\n");
-			ret = -ENOMEM;
-			goto err2;
-		}
-
-	start_addr = buf->dma_addr;
-	end_addr = buf->dma_addr + buf->size;
+	buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
+			&buf->dma_addr, GFP_KERNEL);
+	if (!buf->kvaddr) {
+		DRM_ERROR("failed to allocate buffer.\n");
+		ret = -ENOMEM;
+		goto err1;
+	}
 
 	buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
 	if (!buf->pages) {
@@ -105,23 +92,17 @@
 	}
 
 	sgl = buf->sgt->sgl;
+	start_addr = buf->dma_addr;
 
 	while (i < npages) {
 		buf->pages[i] = phys_to_page(start_addr);
 		sg_set_page(sgl, buf->pages[i], page_size, 0);
 		sg_dma_address(sgl) = start_addr;
 		start_addr += page_size;
-		if (end_addr - start_addr < page_size)
-			break;
 		sgl = sg_next(sgl);
 		i++;
 	}
 
-	buf->pages[i] = phys_to_page(start_addr);
-
-	sgl = sg_next(sgl);
-	sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
-
 	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
 			(unsigned long)buf->kvaddr,
 			(unsigned long)buf->dma_addr,
@@ -150,7 +131,7 @@
 	 * non-continuous memory would be released by exynos
 	 * gem framework.
 	 */
-	if (flags & EXYNOS_BO_NONCONTIG) {
+	if (IS_NONCONTIG_BUFFER(flags)) {
 		DRM_DEBUG_KMS("not support allocation type.\n");
 		return;
 	}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 411832e..eaf630d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -54,16 +54,18 @@
 		 *
 		 * P.S. note that this driver is considered for modularization.
 		 */
-		ret = subdrv->probe(dev, subdrv->manager.dev);
+		ret = subdrv->probe(dev, subdrv->dev);
 		if (ret)
 			return ret;
 	}
 
-	if (subdrv->is_local)
+	if (!subdrv->manager)
 		return 0;
 
+	subdrv->manager->dev = subdrv->dev;
+
 	/* create and initialize a encoder for this sub driver. */
-	encoder = exynos_drm_encoder_create(dev, &subdrv->manager,
+	encoder = exynos_drm_encoder_create(dev, subdrv->manager,
 			(1 << MAX_CRTC) - 1);
 	if (!encoder) {
 		DRM_ERROR("failed to create encoder\n");
@@ -186,7 +188,7 @@
 
 	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
 		if (subdrv->open) {
-			ret = subdrv->open(dev, subdrv->manager.dev, file);
+			ret = subdrv->open(dev, subdrv->dev, file);
 			if (ret)
 				goto err;
 		}
@@ -197,7 +199,7 @@
 err:
 	list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
 		if (subdrv->close)
-			subdrv->close(dev, subdrv->manager.dev, file);
+			subdrv->close(dev, subdrv->dev, file);
 	}
 	return ret;
 }
@@ -209,7 +211,7 @@
 
 	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
 		if (subdrv->close)
-			subdrv->close(dev, subdrv->manager.dev, file);
+			subdrv->close(dev, subdrv->dev, file);
 	}
 }
 EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index fbd0a23..1d81417 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -225,24 +225,25 @@
  * Exynos drm sub driver structure.
  *
  * @list: sub driver has its own list object to register to exynos drm driver.
+ * @dev: pointer to device object for subdrv device driver.
  * @drm_dev: pointer to drm_device and this pointer would be set
  *	when sub driver calls exynos_drm_subdrv_register().
- * @is_local: appear encoder and connector disrelated device.
+ * @manager: subdrv has its own manager to control a hardware appropriately
+ *	and we can access a hardware drawing on this manager.
  * @probe: this callback would be called by exynos drm driver after
  *	subdrv is registered to it.
  * @remove: this callback is used to release resources created
  *	by probe callback.
  * @open: this would be called with drm device file open.
  * @close: this would be called with drm device file close.
- * @manager: subdrv has its own manager to control a hardware appropriately
- *	and we can access a hardware drawing on this manager.
  * @encoder: encoder object owned by this sub driver.
  * @connector: connector object owned by this sub driver.
  */
 struct exynos_drm_subdrv {
 	struct list_head list;
+	struct device *dev;
 	struct drm_device *drm_dev;
-	bool is_local;
+	struct exynos_drm_manager *manager;
 
 	int (*probe)(struct drm_device *drm_dev, struct device *dev);
 	void (*remove)(struct drm_device *dev);
@@ -251,7 +252,6 @@
 	void (*close)(struct drm_device *drm_dev, struct device *dev,
 			struct drm_file *file);
 
-	struct exynos_drm_manager manager;
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 };
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index ecb6db2..29fdbfe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -172,7 +172,7 @@
 static void fimd_apply(struct device *subdrv_dev)
 {
 	struct fimd_context *ctx = get_fimd_context(subdrv_dev);
-	struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
 	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
 	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
 	struct fimd_win_data *win_data;
@@ -577,6 +577,13 @@
 	.disable = fimd_win_disable,
 };
 
+static struct exynos_drm_manager fimd_manager = {
+	.pipe		= -1,
+	.ops		= &fimd_manager_ops,
+	.overlay_ops	= &fimd_overlay_ops,
+	.display_ops	= &fimd_display_ops,
+};
+
 static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
 {
 	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
@@ -628,7 +635,7 @@
 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
 	struct drm_device *drm_dev = subdrv->drm_dev;
-	struct exynos_drm_manager *manager = &subdrv->manager;
+	struct exynos_drm_manager *manager = subdrv->manager;
 	u32 val;
 
 	val = readl(ctx->regs + VIDINTCON1);
@@ -744,7 +751,7 @@
 static int fimd_power_on(struct fimd_context *ctx, bool enable)
 {
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct device *dev = subdrv->manager.dev;
+	struct device *dev = subdrv->dev;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -867,13 +874,10 @@
 
 	subdrv = &ctx->subdrv;
 
+	subdrv->dev = dev;
+	subdrv->manager = &fimd_manager;
 	subdrv->probe = fimd_subdrv_probe;
 	subdrv->remove = fimd_subdrv_remove;
-	subdrv->manager.pipe = -1;
-	subdrv->manager.ops = &fimd_manager_ops;
-	subdrv->manager.overlay_ops = &fimd_overlay_ops;
-	subdrv->manager.display_ops = &fimd_display_ops;
-	subdrv->manager.dev = dev;
 
 	mutex_init(&ctx->lock);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fa1aa94..26d5197 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,9 +56,28 @@
 	return out_msg;
 }
 
-static unsigned int mask_gem_flags(unsigned int flags)
+static int check_gem_flags(unsigned int flags)
 {
-	return flags &= EXYNOS_BO_NONCONTIG;
+	if (flags & ~(EXYNOS_BO_MASK)) {
+		DRM_ERROR("invalid flags.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
+{
+	if (!IS_NONCONTIG_BUFFER(flags)) {
+		if (size >= SZ_1M)
+			return roundup(size, SECTION_SIZE);
+		else if (size >= SZ_64K)
+			return roundup(size, SZ_64K);
+		else
+			goto out;
+	}
+out:
+	return roundup(size, PAGE_SIZE);
 }
 
 static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
@@ -319,10 +338,17 @@
 	struct exynos_drm_gem_buf *buf;
 	int ret;
 
-	size = roundup(size, PAGE_SIZE);
-	DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+	if (!size) {
+		DRM_ERROR("invalid size.\n");
+		return ERR_PTR(-EINVAL);
+	}
 
-	flags = mask_gem_flags(flags);
+	size = roundup_gem_size(size, flags);
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	ret = check_gem_flags(flags);
+	if (ret)
+		return ERR_PTR(ret);
 
 	buf = exynos_drm_init_buf(dev, size);
 	if (!buf)
@@ -331,7 +357,7 @@
 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
 	if (!exynos_gem_obj) {
 		ret = -ENOMEM;
-		goto err;
+		goto err_fini_buf;
 	}
 
 	exynos_gem_obj->buffer = buf;
@@ -347,18 +373,19 @@
 		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
 		if (ret < 0) {
 			drm_gem_object_release(&exynos_gem_obj->base);
-			goto err;
+			goto err_fini_buf;
 		}
 	} else {
 		ret = exynos_drm_alloc_buf(dev, buf, flags);
 		if (ret < 0) {
 			drm_gem_object_release(&exynos_gem_obj->base);
-			goto err;
+			goto err_fini_buf;
 		}
 	}
 
 	return exynos_gem_obj;
-err:
+
+err_fini_buf:
 	exynos_drm_fini_buf(dev, buf);
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e40fbad..4ed8420 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -29,6 +29,8 @@
 #define to_exynos_gem_obj(x)	container_of(x,\
 			struct exynos_drm_gem_obj, base)
 
+#define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
+
 /*
  * exynos drm gem buffer structure.
  *
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 14eb26b..3424463 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -30,9 +30,8 @@
 					struct drm_hdmi_context, subdrv);
 
 /* these callback points shoud be set by specific drivers. */
-static struct exynos_hdmi_display_ops *hdmi_display_ops;
-static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
-static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+static struct exynos_hdmi_ops *hdmi_ops;
+static struct exynos_mixer_ops *mixer_ops;
 
 struct drm_hdmi_context {
 	struct exynos_drm_subdrv	subdrv;
@@ -40,31 +39,20 @@
 	struct exynos_drm_hdmi_context	*mixer_ctx;
 };
 
-void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
-					*display_ops)
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
 {
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (display_ops)
-		hdmi_display_ops = display_ops;
+	if (ops)
+		hdmi_ops = ops;
 }
 
-void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
-					*manager_ops)
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
 {
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (manager_ops)
-		hdmi_manager_ops = manager_ops;
-}
-
-void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
-					*overlay_ops)
-{
-	DRM_DEBUG_KMS("%s\n", __FILE__);
-
-	if (overlay_ops)
-		hdmi_overlay_ops = overlay_ops;
+	if (ops)
+		mixer_ops = ops;
 }
 
 static bool drm_hdmi_is_connected(struct device *dev)
@@ -73,8 +61,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->is_connected)
-		return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+	if (hdmi_ops && hdmi_ops->is_connected)
+		return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
 
 	return false;
 }
@@ -86,9 +74,9 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->get_edid)
-		return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
-				connector, edid, len);
+	if (hdmi_ops && hdmi_ops->get_edid)
+		return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
+					  len);
 
 	return 0;
 }
@@ -99,9 +87,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->check_timing)
-		return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
-				timing);
+	if (hdmi_ops && hdmi_ops->check_timing)
+		return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
 
 	return 0;
 }
@@ -112,8 +99,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->power_on)
-		return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+	if (hdmi_ops && hdmi_ops->power_on)
+		return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
 
 	return 0;
 }
@@ -130,13 +117,13 @@
 {
 	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct exynos_drm_manager *manager = &subdrv->manager;
+	struct exynos_drm_manager *manager = subdrv->manager;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
-		return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
-							manager->pipe);
+	if (mixer_ops && mixer_ops->enable_vblank)
+		return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
+						manager->pipe);
 
 	return 0;
 }
@@ -147,8 +134,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
-		return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+	if (mixer_ops && mixer_ops->disable_vblank)
+		return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
 }
 
 static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
@@ -160,9 +147,9 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->mode_fixup)
-		hdmi_manager_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector,
-						mode, adjusted_mode);
+	if (hdmi_ops && hdmi_ops->mode_fixup)
+		hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
+				     adjusted_mode);
 }
 
 static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
@@ -171,8 +158,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
-		hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+	if (hdmi_ops && hdmi_ops->mode_set)
+		hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
 }
 
 static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
@@ -182,9 +169,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->get_max_resol)
-		hdmi_manager_ops->get_max_resol(ctx->hdmi_ctx->ctx, width,
-							height);
+	if (hdmi_ops && hdmi_ops->get_max_resol)
+		hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
 }
 
 static void drm_hdmi_commit(struct device *subdrv_dev)
@@ -193,8 +179,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->commit)
-		hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+	if (hdmi_ops && hdmi_ops->commit)
+		hdmi_ops->commit(ctx->hdmi_ctx->ctx);
 }
 
 static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
@@ -209,8 +195,8 @@
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		if (hdmi_manager_ops && hdmi_manager_ops->disable)
-			hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+		if (hdmi_ops && hdmi_ops->disable)
+			hdmi_ops->disable(ctx->hdmi_ctx->ctx);
 		break;
 	default:
 		DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
@@ -235,8 +221,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
-		hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+	if (mixer_ops && mixer_ops->win_mode_set)
+		mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
 }
 
 static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
@@ -245,8 +231,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
-		hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+	if (mixer_ops && mixer_ops->win_commit)
+		mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
 }
 
 static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
@@ -255,8 +241,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
-		hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+	if (mixer_ops && mixer_ops->win_disable)
+		mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
 }
 
 static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
@@ -265,6 +251,12 @@
 	.disable = drm_mixer_disable,
 };
 
+static struct exynos_drm_manager hdmi_manager = {
+	.pipe		= -1,
+	.ops		= &drm_hdmi_manager_ops,
+	.overlay_ops	= &drm_hdmi_overlay_ops,
+	.display_ops	= &drm_hdmi_display_ops,
+};
 
 static int hdmi_subdrv_probe(struct drm_device *drm_dev,
 		struct device *dev)
@@ -332,12 +324,9 @@
 
 	subdrv = &ctx->subdrv;
 
+	subdrv->dev = dev;
+	subdrv->manager = &hdmi_manager;
 	subdrv->probe = hdmi_subdrv_probe;
-	subdrv->manager.pipe = -1;
-	subdrv->manager.ops = &drm_hdmi_manager_ops;
-	subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
-	subdrv->manager.display_ops = &drm_hdmi_display_ops;
-	subdrv->manager.dev = dev;
 
 	platform_set_drvdata(pdev, subdrv);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 44497cf..f3ae192 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -38,15 +38,15 @@
 	void			*ctx;
 };
 
-struct exynos_hdmi_display_ops {
+struct exynos_hdmi_ops {
+	/* display */
 	bool (*is_connected)(void *ctx);
 	int (*get_edid)(void *ctx, struct drm_connector *connector,
 			u8 *edid, int len);
 	int (*check_timing)(void *ctx, void *timing);
 	int (*power_on)(void *ctx, int mode);
-};
 
-struct exynos_hdmi_manager_ops {
+	/* manager */
 	void (*mode_fixup)(void *ctx, struct drm_connector *connector,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode);
@@ -57,22 +57,17 @@
 	void (*disable)(void *ctx);
 };
 
-struct exynos_hdmi_overlay_ops {
+struct exynos_mixer_ops {
+	/* manager */
 	int (*enable_vblank)(void *ctx, int pipe);
 	void (*disable_vblank)(void *ctx);
+
+	/* overlay */
 	void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
 	void (*win_commit)(void *ctx, int zpos);
 	void (*win_disable)(void *ctx, int zpos);
 };
 
-extern struct platform_driver hdmi_driver;
-extern struct platform_driver mixer_driver;
-
-void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
-					*display_ops);
-void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
-					*manager_ops);
-void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
-					*overlay_ops);
-
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c277a3a..f92fe4c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -24,6 +24,10 @@
 
 static const uint32_t formats[] = {
 	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV12M,
+	DRM_FORMAT_NV12MT,
 };
 
 static int
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 8e1339f..7b9c153 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -199,7 +199,7 @@
 static void vidi_apply(struct device *subdrv_dev)
 {
 	struct vidi_context *ctx = get_vidi_context(subdrv_dev);
-	struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
 	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
 	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
 	struct vidi_win_data *win_data;
@@ -374,6 +374,13 @@
 	.disable = vidi_win_disable,
 };
 
+static struct exynos_drm_manager vidi_manager = {
+	.pipe		= -1,
+	.ops		= &vidi_manager_ops,
+	.overlay_ops	= &vidi_overlay_ops,
+	.display_ops	= &vidi_display_ops,
+};
+
 static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
 {
 	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
@@ -425,7 +432,7 @@
 	struct vidi_context *ctx = container_of(work, struct vidi_context,
 					work);
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct exynos_drm_manager *manager = &subdrv->manager;
+	struct exynos_drm_manager *manager = subdrv->manager;
 
 	if (manager->pipe < 0)
 		return;
@@ -471,7 +478,7 @@
 static int vidi_power_on(struct vidi_context *ctx, bool enable)
 {
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct device *dev = subdrv->manager.dev;
+	struct device *dev = subdrv->dev;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -611,13 +618,10 @@
 	ctx->raw_edid = (struct edid *)fake_edid_info;
 
 	subdrv = &ctx->subdrv;
+	subdrv->dev = dev;
+	subdrv->manager = &vidi_manager;
 	subdrv->probe = vidi_subdrv_probe;
 	subdrv->remove = vidi_subdrv_remove;
-	subdrv->manager.pipe = -1;
-	subdrv->manager.ops = &vidi_manager_ops;
-	subdrv->manager.overlay_ops = &vidi_overlay_ops;
-	subdrv->manager.display_ops = &vidi_display_ops;
-	subdrv->manager.dev = dev;
 
 	mutex_init(&ctx->lock);
 
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 575a8cb..b003538 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -40,7 +40,6 @@
 
 #include "exynos_hdmi.h"
 
-#define HDMI_OVERLAY_NUMBER	3
 #define MAX_WIDTH		1920
 #define MAX_HEIGHT		1080
 #define get_hdmi_context(dev)	platform_get_drvdata(to_platform_device(dev))
@@ -1194,7 +1193,7 @@
 
 static bool hdmi_is_connected(void *ctx)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
 
 	if (val)
@@ -1207,7 +1206,7 @@
 				u8 *edid, int len)
 {
 	struct edid *raw_edid;
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
@@ -1275,7 +1274,7 @@
 
 static int hdmi_check_timing(void *ctx, void *timing)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	struct fb_videomode *check_timing = timing;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1312,13 +1311,6 @@
 	return 0;
 }
 
-static struct exynos_hdmi_display_ops display_ops = {
-	.is_connected	= hdmi_is_connected,
-	.get_edid	= hdmi_get_edid,
-	.check_timing	= hdmi_check_timing,
-	.power_on	= hdmi_display_power_on,
-};
-
 static void hdmi_set_acr(u32 freq, u8 *acr)
 {
 	u32 n, cts;
@@ -1914,7 +1906,7 @@
 				struct drm_display_mode *adjusted_mode)
 {
 	struct drm_display_mode *m;
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	int index;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1951,7 +1943,7 @@
 
 static void hdmi_mode_set(void *ctx, void *mode)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	int conf_idx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1974,7 +1966,7 @@
 
 static void hdmi_commit(void *ctx)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
@@ -1985,7 +1977,7 @@
 
 static void hdmi_disable(void *ctx)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
@@ -1996,7 +1988,14 @@
 	}
 }
 
-static struct exynos_hdmi_manager_ops manager_ops = {
+static struct exynos_hdmi_ops hdmi_ops = {
+	/* display */
+	.is_connected	= hdmi_is_connected,
+	.get_edid	= hdmi_get_edid,
+	.check_timing	= hdmi_check_timing,
+	.power_on	= hdmi_display_power_on,
+
+	/* manager */
 	.mode_fixup	= hdmi_mode_fixup,
 	.mode_set	= hdmi_mode_set,
 	.get_max_resol	= hdmi_get_max_resol,
@@ -2020,7 +2019,7 @@
 static irqreturn_t hdmi_irq_handler(int irq, void *arg)
 {
 	struct exynos_drm_hdmi_context *ctx = arg;
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+	struct hdmi_context *hdata = ctx->ctx;
 	u32 intc_flag;
 
 	intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
@@ -2173,7 +2172,7 @@
 
 	DRM_DEBUG_KMS("%s\n", __func__);
 
-	hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+	hdmi_resource_poweroff(ctx->ctx);
 
 	return 0;
 }
@@ -2184,7 +2183,7 @@
 
 	DRM_DEBUG_KMS("%s\n", __func__);
 
-	hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+	hdmi_resource_poweron(ctx->ctx);
 
 	return 0;
 }
@@ -2322,8 +2321,7 @@
 	hdata->irq = res->start;
 
 	/* register specific callbacks to common hdmi. */
-	exynos_drm_display_ops_register(&display_ops);
-	exynos_drm_manager_ops_register(&manager_ops);
+	exynos_hdmi_ops_register(&hdmi_ops);
 
 	hdmi_resource_poweron(hdata);
 
@@ -2351,7 +2349,7 @@
 static int __devexit hdmi_remove(struct platform_device *pdev)
 {
 	struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+	struct hdmi_context *hdata = ctx->ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4d5f41e..e15438c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,7 +37,8 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_hdmi.h"
 
-#define HDMI_OVERLAY_NUMBER	3
+#define MIXER_WIN_NR		3
+#define MIXER_DEFAULT_WIN	0
 
 #define get_mixer_context(dev)	platform_get_drvdata(to_platform_device(dev))
 
@@ -75,16 +76,12 @@
 };
 
 struct mixer_context {
-	struct fb_videomode	*default_timing;
-	unsigned int		default_win;
-	unsigned int		default_bpp;
 	unsigned int		irq;
 	int			pipe;
 	bool			interlace;
-	bool			vp_enabled;
 
 	struct mixer_resources	mixer_res;
-	struct hdmi_win_data	win_data[HDMI_OVERLAY_NUMBER];
+	struct hdmi_win_data	win_data[MIXER_WIN_NR];
 };
 
 static const u8 filter_y_horiz_tap8[] = {
@@ -643,9 +640,9 @@
 
 	win = overlay->zpos;
 	if (win == DEFAULT_ZPOS)
-		win = mixer_ctx->default_win;
+		win = MIXER_DEFAULT_WIN;
 
-	if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+	if (win < 0 || win > MIXER_WIN_NR) {
 		DRM_ERROR("overlay plane[%d] is wrong\n", win);
 		return;
 	}
@@ -683,9 +680,9 @@
 	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
 	if (win == DEFAULT_ZPOS)
-		win = mixer_ctx->default_win;
+		win = MIXER_DEFAULT_WIN;
 
-	if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+	if (win < 0 || win > MIXER_WIN_NR) {
 		DRM_ERROR("overlay plane[%d] is wrong\n", win);
 		return;
 	}
@@ -706,9 +703,9 @@
 	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
 	if (win == DEFAULT_ZPOS)
-		win = mixer_ctx->default_win;
+		win = MIXER_DEFAULT_WIN;
 
-	if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+	if (win < 0 || win > MIXER_WIN_NR) {
 		DRM_ERROR("overlay plane[%d] is wrong\n", win);
 		return;
 	}
@@ -722,9 +719,12 @@
 	spin_unlock_irqrestore(&res->reg_slock, flags);
 }
 
-static struct exynos_hdmi_overlay_ops overlay_ops = {
+static struct exynos_mixer_ops mixer_ops = {
+	/* manager */
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
+
+	/* overlay */
 	.win_mode_set		= mixer_win_mode_set,
 	.win_commit		= mixer_win_commit,
 	.win_disable		= mixer_win_disable,
@@ -771,8 +771,7 @@
 static irqreturn_t mixer_irq_handler(int irq, void *arg)
 {
 	struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
-	struct mixer_context *ctx =
-			(struct mixer_context *)drm_hdmi_ctx->ctx;
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
 	struct mixer_resources *res = &ctx->mixer_res;
 	u32 val, val_base;
 
@@ -902,7 +901,7 @@
 
 	DRM_DEBUG_KMS("resume - start\n");
 
-	mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+	mixer_resource_poweron(ctx->ctx);
 
 	return 0;
 }
@@ -913,7 +912,7 @@
 
 	DRM_DEBUG_KMS("suspend - start\n");
 
-	mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+	mixer_resource_poweroff(ctx->ctx);
 
 	return 0;
 }
@@ -926,8 +925,7 @@
 static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
 				 struct platform_device *pdev)
 {
-	struct mixer_context *mixer_ctx =
-			(struct mixer_context *)ctx->ctx;
+	struct mixer_context *mixer_ctx = ctx->ctx;
 	struct device *dev = &pdev->dev;
 	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
 	struct resource *res;
@@ -1076,7 +1074,7 @@
 		goto fail;
 
 	/* register specific callback point to common hdmi. */
-	exynos_drm_overlay_ops_register(&overlay_ops);
+	exynos_mixer_ops_register(&mixer_ops);
 
 	mixer_resource_poweron(ctx);
 
@@ -1093,7 +1091,7 @@
 	struct device *dev = &pdev->dev;
 	struct exynos_drm_hdmi_context *drm_hdmi_ctx =
 					platform_get_drvdata(pdev);
-	struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
 
 	dev_info(dev, "remove successful\n");
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index fdb7cce..b505b70 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1502,14 +1502,6 @@
 	return 0;
 }
 
-static int
-i915_debugfs_common_open(struct inode *inode,
-			 struct file *filp)
-{
-	filp->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t
 i915_wedged_read(struct file *filp,
 		 char __user *ubuf,
@@ -1560,7 +1552,7 @@
 
 static const struct file_operations i915_wedged_fops = {
 	.owner = THIS_MODULE,
-	.open = i915_debugfs_common_open,
+	.open = simple_open,
 	.read = i915_wedged_read,
 	.write = i915_wedged_write,
 	.llseek = default_llseek,
@@ -1622,7 +1614,7 @@
 
 static const struct file_operations i915_max_freq_fops = {
 	.owner = THIS_MODULE,
-	.open = i915_debugfs_common_open,
+	.open = simple_open,
 	.read = i915_max_freq_read,
 	.write = i915_max_freq_write,
 	.llseek = default_llseek,
@@ -1693,7 +1685,7 @@
 
 static const struct file_operations i915_cache_sharing_fops = {
 	.owner = THIS_MODULE,
-	.open = i915_debugfs_common_open,
+	.open = simple_open,
 	.read = i915_cache_sharing_read,
 	.write = i915_cache_sharing_write,
 	.llseek = default_llseek,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9341eb8..785f67f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1183,6 +1183,21 @@
 	return can_switch;
 }
 
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+	if (i915_enable_ppgtt >= 0)
+		return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+	/* Disable ppgtt on SNB if VT-d is on. */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+#endif
+
+	return true;
+}
+
 static int i915_load_gem_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1197,7 +1212,7 @@
 	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
 
 	mutex_lock(&dev->struct_mutex);
-	if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
+	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
 		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
 		 * aperture accordingly when using aliasing ppgtt. */
 		gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
@@ -1207,8 +1222,10 @@
 		i915_gem_do_init(dev, 0, mappable_size, gtt_size);
 
 		ret = i915_gem_init_aliasing_ppgtt(dev);
-		if (ret)
+		if (ret) {
+			mutex_unlock(&dev->struct_mutex);
 			return ret;
+		}
 	} else {
 		/* Let GEM Manage all of the aperture.
 		 *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1a7559b..ae8a64f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -64,9 +64,13 @@
 		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
 int i915_enable_rc6 __read_mostly = -1;
-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
 MODULE_PARM_DESC(i915_enable_rc6,
-		"Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
+		"Enable power-saving render C-state 6. "
+		"Different stages can be selected via bitmask values "
+		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+		"default: -1 (use per-chip default)");
 
 int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -103,8 +107,8 @@
 		"WARNING: Disabling this can cause system wide hangs. "
 		"(default: true)");
 
-bool i915_enable_ppgtt __read_mostly = 1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
+int i915_enable_ppgtt __read_mostly = -1;
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
 MODULE_PARM_DESC(i915_enable_ppgtt,
 		"Enable PPGTT (default: true)");
 
@@ -292,6 +296,7 @@
 	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
 	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
 	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
 	{0, 0, 0}
 };
 
@@ -533,7 +538,9 @@
 		drm_irq_install(dev);
 
 		/* Resume the modeset for every activated CRTC */
+		mutex_lock(&dev->mode_config.mutex);
 		drm_helper_resume_force_mode(dev);
+		mutex_unlock(&dev->mode_config.mutex);
 
 		if (IS_IRONLAKE_M(dev))
 			ironlake_enable_rc6(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c0f19f5..5fabc6c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1053,6 +1053,27 @@
 
 #include "i915_trace.h"
 
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE			(1<<0)
+#define INTEL_RC6p_ENABLE			(1<<1)
+#define INTEL_RC6pp_ENABLE			(1<<2)
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
@@ -1065,7 +1086,7 @@
 extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
-extern bool i915_enable_ppgtt __read_mostly;
+extern int i915_enable_ppgtt __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1f441f5..0e3c6ac 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1472,16 +1472,19 @@
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
 	obj->last_rendering_seqno = seqno;
+
 	if (obj->fenced_gpu_access) {
-		struct drm_i915_fence_reg *reg;
-
-		BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
-
 		obj->last_fenced_seqno = seqno;
 		obj->last_fenced_ring = ring;
 
-		reg = &dev_priv->fence_regs[obj->fence_reg];
-		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+		/* Bump MRU to take account of the delayed flush */
+		if (obj->fence_reg != I915_FENCE_REG_NONE) {
+			struct drm_i915_fence_reg *reg;
+
+			reg = &dev_priv->fence_regs[obj->fence_reg];
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+		}
 	}
 }
 
@@ -1490,6 +1493,7 @@
 {
 	list_del_init(&obj->ring_list);
 	obj->last_rendering_seqno = 0;
+	obj->last_fenced_seqno = 0;
 }
 
 static void
@@ -1518,6 +1522,7 @@
 	BUG_ON(!list_empty(&obj->gpu_write_list));
 	BUG_ON(!obj->active);
 	obj->ring = NULL;
+	obj->last_fenced_ring = NULL;
 
 	i915_gem_object_move_off_active(obj);
 	obj->fenced_gpu_access = false;
@@ -3754,12 +3759,32 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t pd_offset;
 	struct intel_ring_buffer *ring;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	uint32_t __iomem *pd_addr;
+	uint32_t pd_entry;
 	int i;
 
 	if (!dev_priv->mm.aliasing_ppgtt)
 		return;
 
-	pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
+
+	pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+	for (i = 0; i < ppgtt->num_pd_entries; i++) {
+		dma_addr_t pt_addr;
+
+		if (dev_priv->mm.gtt->needs_dmar)
+			pt_addr = ppgtt->pt_dma_addr[i];
+		else
+			pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+		pd_entry |= GEN6_PDE_VALID;
+
+		writel(pd_entry, pd_addr + i);
+	}
+	readl(pd_addr);
+
+	pd_offset = ppgtt->pd_offset;
 	pd_offset /= 64; /* in cachelines, */
 	pd_offset <<= 16;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 81687af..f51a696 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -498,8 +498,8 @@
 				if (ret)
 					goto err_unpin;
 			}
+			obj->pending_fenced_gpu_access = true;
 		}
-		obj->pending_fenced_gpu_access = need_fence;
 	}
 
 	entry->offset = obj->gtt_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2eacd78..a135c61 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -65,9 +65,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_ppgtt *ppgtt;
-	uint32_t pd_entry;
 	unsigned first_pd_entry_in_global_pt;
-	uint32_t __iomem *pd_addr;
 	int i;
 	int ret = -ENOMEM;
 
@@ -100,7 +98,6 @@
 			goto err_pt_alloc;
 	}
 
-	pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
 	for (i = 0; i < ppgtt->num_pd_entries; i++) {
 		dma_addr_t pt_addr;
 		if (dev_priv->mm.gtt->needs_dmar) {
@@ -117,13 +114,7 @@
 			ppgtt->pt_dma_addr[i] = pt_addr;
 		} else
 			pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
-		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
-		pd_entry |= GEN6_PDE_VALID;
-
-		writel(pd_entry, pd_addr + i);
 	}
-	readl(pd_addr);
 
 	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3886cf0..b4bb1ef 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2385,6 +2385,7 @@
 #define   PIPECONF_DISABLE	0
 #define   PIPECONF_DOUBLE_WIDE	(1<<30)
 #define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
 #define   PIPECONF_SINGLE_WIDE	0
 #define   PIPECONF_PIPE_UNLOCKED 0
 #define   PIPECONF_PIPE_LOCKED	(1<<25)
@@ -3727,6 +3728,9 @@
 #define  GT_FIFO_FREE_ENTRIES			0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
 
+#define GEN6_UCGCTL1				0x9400
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
+
 #define GEN6_UCGCTL2				0x9404
 # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE		(1 << 13)
 # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 8168d8f..b48fc2a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -24,6 +24,7 @@
  *    Eric Anholt <eric@anholt.net>
  *
  */
+#include <linux/dmi.h>
 #include <drm/drm_dp_helper.h>
 #include "drmP.h"
 #include "drm.h"
@@ -621,6 +622,26 @@
 	dev_priv->edp.bpp = 18;
 }
 
+static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+	DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+		      "VBIOS ROM for %s\n",
+		      id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+	{
+		.callback = intel_no_opregion_vbt_callback,
+		.ident = "ThinkCentre A57",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+		},
+	},
+	{ }
+};
+
 /**
  * intel_parse_bios - find VBT and initialize settings from the BIOS
  * @dev: DRM device
@@ -641,7 +662,7 @@
 	init_vbt_defaults(dev_priv);
 
 	/* XXX Should this validation be moved to intel_opregion.c? */
-	if (dev_priv->opregion.vbt) {
+	if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
 		struct vbt_header *vbt = dev_priv->opregion.vbt;
 		if (memcmp(vbt->signature, "$VBT", 4) == 0) {
 			DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d514719..bae38ac 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2245,6 +2245,33 @@
 }
 
 static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	bool was_interruptible = dev_priv->mm.interruptible;
+	int ret;
+
+	wait_event(dev_priv->pending_flip_queue,
+		   atomic_read(&dev_priv->mm.wedged) ||
+		   atomic_read(&obj->pending_flip) == 0);
+
+	/* Big Hammer, we also need to ensure that any pending
+	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+	 * current scanout is retired before unpinning the old
+	 * framebuffer.
+	 *
+	 * This should only fail upon a hung GPU, in which case we
+	 * can safely continue.
+	 */
+	dev_priv->mm.interruptible = false;
+	ret = i915_gem_object_finish_gpu(obj);
+	dev_priv->mm.interruptible = was_interruptible;
+
+	return ret;
+}
+
+static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 		    struct drm_framebuffer *old_fb)
 {
@@ -2282,25 +2309,8 @@
 		return ret;
 	}
 
-	if (old_fb) {
-		struct drm_i915_private *dev_priv = dev->dev_private;
-		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-
-		wait_event(dev_priv->pending_flip_queue,
-			   atomic_read(&dev_priv->mm.wedged) ||
-			   atomic_read(&obj->pending_flip) == 0);
-
-		/* Big Hammer, we also need to ensure that any pending
-		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
-		 * current scanout is retired before unpinning the old
-		 * framebuffer.
-		 *
-		 * This should only fail upon a hung GPU, in which case we
-		 * can safely continue.
-		 */
-		ret = i915_gem_object_finish_gpu(obj);
-		(void) ret;
-	}
+	if (old_fb)
+		intel_finish_fb(old_fb);
 
 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
 					 LEAVE_ATOMIC_MODE_SET);
@@ -3371,6 +3381,23 @@
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_device *dev = crtc->dev;
 
+	/* Flush any pending WAITs before we disable the pipe. Note that
+	 * we need to drop the struct_mutex in order to acquire it again
+	 * during the lowlevel dpms routines around a couple of the
+	 * operations. It does not look trivial nor desirable to move
+	 * that locking higher. So instead we leave a window for the
+	 * submission of further commands on the fb before we can actually
+	 * disable it. This race with userspace exists anyway, and we can
+	 * only rely on the pipe being disabled by userspace after it
+	 * receives the hotplug notification and has flushed any pending
+	 * batches.
+	 */
+	if (crtc->fb) {
+		mutex_lock(&dev->struct_mutex);
+		intel_finish_fb(crtc->fb);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
@@ -5539,7 +5566,8 @@
 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
 			DRM_DEBUG_KMS("Using SSC on panel\n");
 			temp |= DREF_SSC1_ENABLE;
-		}
+		} else
+			temp &= ~DREF_SSC1_ENABLE;
 
 		/* Get SSC going before enabling the outputs */
 		I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -7580,6 +7608,12 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg, val;
 
+	/* Clear any frame start delays used for debugging left by the BIOS */
+	for_each_pipe(pipe) {
+		reg = PIPECONF(pipe);
+		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+	}
+
 	if (HAS_PCH_SPLIT(dev))
 		return;
 
@@ -8215,7 +8249,7 @@
 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
-static bool intel_enable_rc6(struct drm_device *dev)
+static int intel_enable_rc6(struct drm_device *dev)
 {
 	/*
 	 * Respect the kernel parameter if it is set
@@ -8233,11 +8267,11 @@
 	 * Disable rc6 on Sandybridge
 	 */
 	if (INTEL_INFO(dev)->gen == 6) {
-		DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
-		return 0;
+		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+		return INTEL_RC6_ENABLE;
 	}
-	DRM_DEBUG_DRIVER("RC6 enabled\n");
-	return 1;
+	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -8247,6 +8281,7 @@
 	u32 pcu_mbox, rc6_mask = 0;
 	u32 gtfifodbg;
 	int cur_freq, min_freq, max_freq;
+	int rc6_mode;
 	int i;
 
 	/* Here begins a magic sequence of register writes to enable
@@ -8284,9 +8319,20 @@
 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-	if (intel_enable_rc6(dev_priv->dev))
-		rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
-			((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
+	rc6_mode = intel_enable_rc6(dev_priv->dev);
+	if (rc6_mode & INTEL_RC6_ENABLE)
+		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+	if (rc6_mode & INTEL_RC6p_ENABLE)
+		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+	if (rc6_mode & INTEL_RC6pp_ENABLE)
+		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
 
 	I915_WRITE(GEN6_RC_CONTROL,
 		   rc6_mask |
@@ -8510,6 +8556,10 @@
 	I915_WRITE(WM2_LP_ILK, 0);
 	I915_WRITE(WM1_LP_ILK, 0);
 
+	I915_WRITE(GEN6_UCGCTL1,
+		   I915_READ(GEN6_UCGCTL1) |
+		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
+
 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 	 * gating disable must be set.  Failure to set it results in
 	 * flickering pixels due to Z write ordering failures after
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 110552f..4b63791 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -219,14 +219,38 @@
 	return (max_link_clock * max_lanes * 8) / 10;
 }
 
+static bool
+intel_dp_adjust_dithering(struct intel_dp *intel_dp,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
+{
+	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+	int max_lanes = intel_dp_max_lane_count(intel_dp);
+	int max_rate, mode_rate;
+
+	mode_rate = intel_dp_link_required(mode->clock, 24);
+	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+	if (mode_rate > max_rate) {
+		mode_rate = intel_dp_link_required(mode->clock, 18);
+		if (mode_rate > max_rate)
+			return false;
+
+		if (adjusted_mode)
+			adjusted_mode->private_flags
+				|= INTEL_MODE_DP_FORCE_6BPC;
+
+		return true;
+	}
+
+	return true;
+}
+
 static int
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
-	int max_lanes = intel_dp_max_lane_count(intel_dp);
-	int max_rate, mode_rate;
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
 		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -236,16 +260,8 @@
 			return MODE_PANEL;
 	}
 
-	mode_rate = intel_dp_link_required(mode->clock, 24);
-	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-
-	if (mode_rate > max_rate) {
-			mode_rate = intel_dp_link_required(mode->clock, 18);
-			if (mode_rate > max_rate)
-				return MODE_CLOCK_HIGH;
-			else
-				mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
-	}
+	if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
+		return MODE_CLOCK_HIGH;
 
 	if (mode->clock < 10000)
 		return MODE_CLOCK_LOW;
@@ -672,7 +688,7 @@
 	int lane_count, clock;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
-	int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+	int bpp;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -686,6 +702,11 @@
 		mode->clock = intel_dp->panel_fixed_mode->clock;
 	}
 
+	if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
+		return false;
+
+	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
 	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
 		for (clock = 0; clock <= max_clock; clock++) {
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 601c86e..8fdc957 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -390,7 +390,7 @@
 		bus->has_gpio = intel_gpio_setup(bus, i);
 
 		/* XXX force bit banging until GMBUS is fully debugged */
-		if (bus->has_gpio && IS_GEN2(dev))
+		if (bus->has_gpio)
 			bus->force_bit = true;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c5c0973..95db2e9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -755,6 +755,14 @@
 			DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
 		},
 	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "MSI Wind Box DC500",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+			DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+		},
+	},
 
 	{ }	/* terminating entry */
 };
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fc66af6..f75806e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -626,7 +626,7 @@
 	/* Workaround to force correct ordering between irq and seqno writes on
 	 * ivb (and maybe also on snb) by reading from a CS register (like
 	 * ACTHD) before reading the status page. */
-	if (IS_GEN7(dev))
+	if (IS_GEN6(dev) || IS_GEN7(dev))
 		intel_ring_get_active_head(ring);
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
@@ -1038,7 +1038,7 @@
 	 * of the buffer.
 	 */
 	ring->effective_size = ring->size;
-	if (IS_I830(ring->dev))
+	if (IS_I830(ring->dev) || IS_845G(ring->dev))
 		ring->effective_size -= 128;
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7aa0450..e90dfb6 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -95,7 +95,6 @@
 	/* must disable */
 	sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
 	sprctl |= SPRITE_ENABLE;
-	sprctl |= SPRITE_DEST_KEY;
 
 	/* Sizes are 0 based */
 	src_w--;
@@ -411,6 +410,9 @@
 
 	old_obj = intel_plane->obj;
 
+	src_w = src_w >> 16;
+	src_h = src_h >> 16;
+
 	/* Pipe must be running... */
 	if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
 		return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ca16399..97a8126 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -13,6 +13,7 @@
 	select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
 	select ACPI_WMI if ACPI
 	select MXM_WMI if ACPI
+	select POWER_SUPPLY
 	help
 	  Choose this option for open-source nVidia support.
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 637afe7..80963d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -177,14 +177,15 @@
 
 	if (!pci_enable_rom(pdev)) {
 		void __iomem *rom = pci_map_rom(pdev, &length);
-		if (rom) {
+		if (rom && length) {
 			bios->data = kmalloc(length, GFP_KERNEL);
 			if (bios->data) {
 				memcpy_fromio(bios->data, rom, length);
 				bios->length = length;
 			}
-			pci_unmap_rom(pdev, rom);
 		}
+		if (rom)
+			pci_unmap_rom(pdev, rom);
 
 		pci_disable_rom(pdev);
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 44e6416..846afb0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -436,11 +436,11 @@
 	}
 
 	if (dev_priv->card_type < NV_C0) {
-		init->subchan[0].handle = NvSw;
-		init->subchan[0].grclass = NV_SW;
-		init->nr_subchan = 1;
-	} else {
-		init->nr_subchan = 0;
+		init->subchan[0].handle = 0x00000000;
+		init->subchan[0].grclass = 0x0000;
+		init->subchan[1].handle = NvSw;
+		init->subchan[1].grclass = NV_SW;
+		init->nr_subchan = 2;
 	}
 
 	/* Named memory object area */
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index bcf0fd9..23d4edf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,8 +48,8 @@
 
 /* Hardcoded object assignments to subchannels (subchannel id). */
 enum {
-	NvSubSw		= 0,
-	NvSubM2MF	= 1,
+	NvSubM2MF	= 0,
+	NvSubSw		= 1,
 	NvSub2D		= 2,
 	NvSubCtxSurf2D  = 2,
 	NvSubGdiRect    = 3,
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4886b3..c2a8511 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -642,7 +642,7 @@
 		OUT_RING  (chan, chan->vram_handle);
 		OUT_RING  (chan, chan->gart_handle);
 	} else
-	if (dev_priv->card_type <= NV_C0) {
+	if (dev_priv->card_type <= NV_D0) {
 		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
 		if (ret)
 			goto error;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d1bd239..5ce9bf5 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1306,8 +1306,11 @@
 
 int atom_asic_init(struct atom_context *ctx)
 {
+	struct radeon_device *rdev = ctx->card->dev->dev_private;
 	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
 	uint32_t ps[16];
+	int ret;
+
 	memset(ps, 0, 64);
 
 	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
@@ -1317,7 +1320,17 @@
 
 	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
 		return 1;
-	return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+	ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+	if (ret)
+		return ret;
+
+	memset(ps, 0, 64);
+
+	if (rdev->family < CHIP_R600) {
+		if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+			atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+	}
+	return ret;
 }
 
 void atom_destroy(struct atom_context *ctx)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 93cfe20..25fea63 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -44,6 +44,7 @@
 #define ATOM_CMD_SETSCLK	0x0A
 #define ATOM_CMD_SETMCLK	0x0B
 #define ATOM_CMD_SETPCLK	0x0C
+#define ATOM_CMD_SPDFANCNTL	0x39
 
 #define ATOM_DATA_FWI_PTR	0xC
 #define ATOM_DATA_IIO_PTR	0x32
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e607c4d..2d39f99 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -230,6 +230,10 @@
 	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
 		return;
 
+	/* some R4xx chips have the wrong frev */
+	if (rdev->family <= CHIP_RV410)
+		frev = 1;
+
 	switch (frev) {
 	case 1:
 		switch (crev) {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 81801c1..fe33d35 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2553,7 +2553,7 @@
 	 * or the chip could hang on a subsequent access
 	 */
 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
-		udelay(5000);
+		mdelay(5);
 	}
 
 	/* This function is required to workaround a hardware bug in some (all?)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 391bd26..de71243 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2839,7 +2839,7 @@
 		/* r7xx asics need to soft reset RLC before halting */
 		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
 		RREG32(SRBM_SOFT_RESET);
-		udelay(15000);
+		mdelay(15);
 		WREG32(SRBM_SOFT_RESET, 0);
 		RREG32(SRBM_SOFT_RESET);
 	}
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 84c5462..75ed17c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -407,7 +407,7 @@
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
-	DRM_UDELAY(15000);
+	mdelay(15);
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
 
 	fw_data = (const __be32 *)dev_priv->me_fw->data;
@@ -500,7 +500,7 @@
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
-	DRM_UDELAY(15000);
+	mdelay(15);
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
 
 	fw_data = (const __be32 *)dev_priv->pfp_fw->data;
@@ -1797,7 +1797,7 @@
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
-	DRM_UDELAY(15000);
+	mdelay(15);
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 6ae0c75..9c6b29a 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -633,7 +633,7 @@
 				tmp &= ~(R300_SCLK_FORCE_VAP);
 				tmp |= RADEON_SCLK_FORCE_CP;
 				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
-				udelay(15000);
+				mdelay(15);
 
 				tmp = RREG32_PLL(R300_SCLK_CNTL2);
 				tmp &= ~(R300_SCLK_FORCE_TCL |
@@ -651,12 +651,12 @@
 			tmp |= (RADEON_ENGIN_DYNCLK_MODE |
 				(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
 			WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 
 			tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
 			tmp |= RADEON_SCLK_DYN_START_CNTL;
 			WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 
 			/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
 			   to lockup randomly, leave them as set by BIOS.
@@ -696,7 +696,7 @@
 					tmp |= RADEON_SCLK_MORE_FORCEON;
 				}
 				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
-				udelay(15000);
+				mdelay(15);
 			}
 
 			/* RV200::A11 A12, RV250::A11 A12 */
@@ -709,7 +709,7 @@
 				tmp |= RADEON_TCL_BYPASS_DISABLE;
 				WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
 			}
-			udelay(15000);
+			mdelay(15);
 
 			/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
 			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
@@ -722,14 +722,14 @@
 				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
 			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 
 			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
 			tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
 				RADEON_PIXCLK_DAC_ALWAYS_ONb);
 
 			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 		}
 	} else {
 		/* Turn everything OFF (ForceON to everything) */
@@ -861,7 +861,7 @@
 			}
 			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
 
-			udelay(16000);
+			mdelay(16);
 
 			if ((rdev->family == CHIP_R300) ||
 			    (rdev->family == CHIP_R350)) {
@@ -870,7 +870,7 @@
 					R300_SCLK_FORCE_GA |
 					R300_SCLK_FORCE_CBA);
 				WREG32_PLL(R300_SCLK_CNTL2, tmp);
-				udelay(16000);
+				mdelay(16);
 			}
 
 			if (rdev->flags & RADEON_IS_IGP) {
@@ -878,7 +878,7 @@
 				tmp &= ~(RADEON_FORCEON_MCLKA |
 					 RADEON_FORCEON_YCLKA);
 				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
-				udelay(16000);
+				mdelay(16);
 			}
 
 			if ((rdev->family == CHIP_RV200) ||
@@ -887,7 +887,7 @@
 				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
 				tmp |= RADEON_SCLK_MORE_FORCEON;
 				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
-				udelay(16000);
+				mdelay(16);
 			}
 
 			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
@@ -900,7 +900,7 @@
 				 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
 			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
-			udelay(16000);
+			mdelay(16);
 
 			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
 			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 81fc100..2cad9fd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2845,7 +2845,7 @@
 					case 4:
 						val = RBIOS16(index);
 						index += 2;
-						udelay(val * 1000);
+						mdelay(val);
 						break;
 					case 6:
 						slave_addr = id & 0xff;
@@ -3044,7 +3044,7 @@
 					udelay(150);
 					break;
 				case 2:
-					udelay(1000);
+					mdelay(1);
 					break;
 				case 3:
 					while (tmp--) {
@@ -3075,13 +3075,13 @@
 						/*mclk_cntl |= 0x00001111;*//* ??? */
 						WREG32_PLL(RADEON_MCLK_CNTL,
 							   mclk_cntl);
-						udelay(10000);
+						mdelay(10);
 #endif
 						WREG32_PLL
 						    (RADEON_CLK_PWRMGT_CNTL,
 						     tmp &
 						     ~RADEON_CG_NO1_DEBUG_0);
-						udelay(10000);
+						mdelay(10);
 					}
 					break;
 				default:
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 85bcfc8..3edec1c 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -900,6 +900,10 @@
 	struct radeon_i2c_chan *i2c;
 	int ret;
 
+	/* don't add the mm_i2c bus unless hw_i2c is enabled */
+	if (rec->mm_i2c && (radeon_hw_i2c == 0))
+		return NULL;
+
 	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
 	if (i2c == NULL)
 		return NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2f46e0c8..42db254 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -88,7 +88,7 @@
 		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
 		lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
 		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
-		udelay(1000);
+		mdelay(1);
 
 		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
 		lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
@@ -101,7 +101,7 @@
 				  (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
 		if (is_mac)
 			lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
-		udelay(panel_pwr_delay * 1000);
+		mdelay(panel_pwr_delay);
 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
@@ -118,10 +118,10 @@
 			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
 			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
 		}
-		udelay(panel_pwr_delay * 1000);
+		mdelay(panel_pwr_delay);
 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
 		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
-		udelay(panel_pwr_delay * 1000);
+		mdelay(panel_pwr_delay);
 		break;
 	}
 
@@ -656,7 +656,7 @@
 
 	WREG32(RADEON_DAC_MACRO_CNTL, tmp);
 
-	udelay(2000);
+	mdelay(2);
 
 	if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
 		found = connector_status_connected;
@@ -1499,7 +1499,7 @@
 	tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
 	WREG32(RADEON_DAC_CNTL2, tmp);
 
-	udelay(10000);
+	mdelay(10);
 
 	if (ASIC_IS_R300(rdev)) {
 		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6f70158..df6a4db 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -241,7 +241,8 @@
 				domain_start = bo->rdev->mc.vram_start;
 			else
 				domain_start = bo->rdev->mc.gtt_start;
-			WARN_ON_ONCE((*gpu_addr - domain_start) > max_offset);
+			WARN_ON_ONCE(max_offset <
+				     (radeon_bo_gpu_offset(bo) - domain_start));
 		}
 
 		return 0;
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 031aaaf..b6d8608 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -988,7 +988,7 @@
 	 * for locking on FreeBSD.
 	 */
 	if (cmdbuf->size) {
-		kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL);
+		kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
 		if (kcmd_addr == NULL)
 			return -ENOMEM;
 
@@ -1015,8 +1015,8 @@
 		cmdbuf->vb_addr = kvb_addr;
 	}
 	if (cmdbuf->nbox) {
-		kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
-				    GFP_KERNEL);
+		kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
+					  GFP_KERNEL);
 		if (kbox_addr == NULL) {
 			ret = -ENOMEM;
 			goto done;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 5340c5f..5367390 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -47,7 +47,7 @@
 static const struct file_operations udl_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
-	.mmap = drm_gem_mmap,
+	.mmap = udl_drm_gem_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
 	.unlocked_ioctl	= drm_ioctl,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1612954..96820d0 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -121,6 +121,7 @@
 
 int udl_gem_vmap(struct udl_gem_object *obj);
 void udl_gem_vunmap(struct udl_gem_object *obj);
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 852642d..92f19ef 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -71,6 +71,20 @@
 	return drm_gem_handle_delete(file, handle);
 }
 
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	return ret;
+}
+
 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 12f9777..45c3433 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -1525,12 +1525,6 @@
 /*
  * The "eeprom" file
  */
-static int picolcd_debug_eeprom_open(struct inode *i, struct file *f)
-{
-	f->private_data = i->i_private;
-	return 0;
-}
-
 static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
 		size_t s, loff_t *off)
 {
@@ -1618,7 +1612,7 @@
  */
 static const struct file_operations picolcd_debug_eeprom_fops = {
 	.owner    = THIS_MODULE,
-	.open     = picolcd_debug_eeprom_open,
+	.open     = simple_open,
 	.read     = picolcd_debug_eeprom_read,
 	.write    = picolcd_debug_eeprom_write,
 	.llseek   = generic_file_llseek,
@@ -1627,12 +1621,6 @@
 /*
  * The "flash" file
  */
-static int picolcd_debug_flash_open(struct inode *i, struct file *f)
-{
-	f->private_data = i->i_private;
-	return 0;
-}
-
 /* record a flash address to buf (bounds check to be done by caller) */
 static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off)
 {
@@ -1817,7 +1805,7 @@
  */
 static const struct file_operations picolcd_debug_flash_fops = {
 	.owner    = THIS_MODULE,
-	.open     = picolcd_debug_flash_open,
+	.open     = simple_open,
 	.read     = picolcd_debug_flash_read,
 	.write    = picolcd_debug_flash_write,
 	.llseek   = generic_file_llseek,
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index 17dabc1..eec3291 100644
--- a/drivers/hid/hid-wiimote-debug.c
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -23,12 +23,6 @@
 	struct dentry *drm;
 };
 
-static int wiidebug_eeprom_open(struct inode *i, struct file *f)
-{
-	f->private_data = i->i_private;
-	return 0;
-}
-
 static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
 								loff_t *off)
 {
@@ -83,7 +77,7 @@
 
 static const struct file_operations wiidebug_eeprom_fops = {
 	.owner = THIS_MODULE,
-	.open = wiidebug_eeprom_open,
+	.open = simple_open,
 	.read = wiidebug_eeprom_read,
 	.llseek = generic_file_llseek,
 };
diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig
new file mode 100644
index 0000000..d94e38d
--- /dev/null
+++ b/drivers/hsi/Kconfig
@@ -0,0 +1,19 @@
+#
+# HSI driver configuration
+#
+menuconfig HSI
+	tristate "HSI support"
+	---help---
+	  The "High speed synchronous Serial Interface" is
+	  synchronous serial interface used mainly to connect
+	  application engines and cellular modems.
+
+if HSI
+
+config HSI_BOARDINFO
+	bool
+	default y
+
+source "drivers/hsi/clients/Kconfig"
+
+endif # HSI
diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile
new file mode 100644
index 0000000..9d5d33f
--- /dev/null
+++ b/drivers/hsi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for HSI
+#
+obj-$(CONFIG_HSI_BOARDINFO)	+= hsi_boardinfo.o
+obj-$(CONFIG_HSI)		+= hsi.o
+obj-y				+= clients/
diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig
new file mode 100644
index 0000000..3bacd27
--- /dev/null
+++ b/drivers/hsi/clients/Kconfig
@@ -0,0 +1,13 @@
+#
+# HSI clients configuration
+#
+
+comment "HSI clients"
+
+config HSI_CHAR
+	tristate "HSI/SSI character driver"
+	depends on HSI
+	---help---
+	  If you say Y here, you will enable the HSI/SSI character driver.
+	  This driver provides a simple character device interface for
+	  serial communication with the cellular modem over HSI/SSI bus.
diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile
new file mode 100644
index 0000000..327c0e2
--- /dev/null
+++ b/drivers/hsi/clients/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for HSI clients
+#
+
+obj-$(CONFIG_HSI_CHAR)	+= hsi_char.o
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
new file mode 100644
index 0000000..88a050d
--- /dev/null
+++ b/drivers/hsi/clients/hsi_char.c
@@ -0,0 +1,802 @@
+/*
+ * HSI character device driver, implements the character device
+ * interface.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Andras Domokos <andras.domokos@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/ioctl.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/scatterlist.h>
+#include <linux/stat.h>
+#include <linux/hsi/hsi.h>
+#include <linux/hsi/hsi_char.h>
+
+#define HSC_DEVS		16 /* Num of channels */
+#define HSC_MSGS		4
+
+#define HSC_RXBREAK		0
+
+#define HSC_ID_BITS		6
+#define HSC_PORT_ID_BITS	4
+#define HSC_ID_MASK		3
+#define HSC_PORT_ID_MASK	3
+#define HSC_CH_MASK		0xf
+
+/*
+ * We support up to 4 controllers that can have up to 4
+ * ports, which should currently be more than enough.
+ */
+#define HSC_BASEMINOR(id, port_id) \
+		((((id) & HSC_ID_MASK) << HSC_ID_BITS) | \
+		(((port_id) & HSC_PORT_ID_MASK) << HSC_PORT_ID_BITS))
+
+enum {
+	HSC_CH_OPEN,
+	HSC_CH_READ,
+	HSC_CH_WRITE,
+	HSC_CH_WLINE,
+};
+
+enum {
+	HSC_RX,
+	HSC_TX,
+};
+
+struct hsc_client_data;
+/**
+ * struct hsc_channel - hsi_char internal channel data
+ * @ch: channel number
+ * @flags: Keeps state of the channel (open/close, reading, writing)
+ * @free_msgs_list: List of free HSI messages/requests
+ * @rx_msgs_queue: List of pending RX requests
+ * @tx_msgs_queue: List of pending TX requests
+ * @lock: Serialize access to the lists
+ * @cl: reference to the associated hsi_client
+ * @cl_data: reference to the client data that this channels belongs to
+ * @rx_wait: RX requests wait queue
+ * @tx_wait: TX requests wait queue
+ */
+struct hsc_channel {
+	unsigned int		ch;
+	unsigned long		flags;
+	struct list_head	free_msgs_list;
+	struct list_head	rx_msgs_queue;
+	struct list_head	tx_msgs_queue;
+	spinlock_t		lock;
+	struct hsi_client	*cl;
+	struct hsc_client_data *cl_data;
+	wait_queue_head_t	rx_wait;
+	wait_queue_head_t	tx_wait;
+};
+
+/**
+ * struct hsc_client_data - hsi_char internal client data
+ * @cdev: Characther device associated to the hsi_client
+ * @lock: Lock to serialize open/close access
+ * @flags: Keeps track of port state (rx hwbreak armed)
+ * @usecnt: Use count for claiming the HSI port (mutex protected)
+ * @cl: Referece to the HSI client
+ * @channels: Array of channels accessible by the client
+ */
+struct hsc_client_data {
+	struct cdev		cdev;
+	struct mutex		lock;
+	unsigned long		flags;
+	unsigned int		usecnt;
+	struct hsi_client	*cl;
+	struct hsc_channel	channels[HSC_DEVS];
+};
+
+/* Stores the major number dynamically allocated for hsi_char */
+static unsigned int hsc_major;
+/* Maximum buffer size that hsi_char will accept from userspace */
+static unsigned int max_data_size = 0x1000;
+module_param(max_data_size, uint, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
+
+static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg,
+							struct list_head *queue)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&channel->lock, flags);
+	list_add_tail(&msg->link, queue);
+	spin_unlock_irqrestore(&channel->lock, flags);
+}
+
+static struct hsi_msg *hsc_get_first_msg(struct hsc_channel *channel,
+							struct list_head *queue)
+{
+	struct hsi_msg *msg = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&channel->lock, flags);
+
+	if (list_empty(queue))
+		goto out;
+
+	msg = list_first_entry(queue, struct hsi_msg, link);
+	list_del(&msg->link);
+out:
+	spin_unlock_irqrestore(&channel->lock, flags);
+
+	return msg;
+}
+
+static inline void hsc_msg_free(struct hsi_msg *msg)
+{
+	kfree(sg_virt(msg->sgt.sgl));
+	hsi_free_msg(msg);
+}
+
+static void hsc_free_list(struct list_head *list)
+{
+	struct hsi_msg *msg, *tmp;
+
+	list_for_each_entry_safe(msg, tmp, list, link) {
+		list_del(&msg->link);
+		hsc_msg_free(msg);
+	}
+}
+
+static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l)
+{
+	unsigned long flags;
+	LIST_HEAD(list);
+
+	spin_lock_irqsave(&channel->lock, flags);
+	list_splice_init(l, &list);
+	spin_unlock_irqrestore(&channel->lock, flags);
+
+	hsc_free_list(&list);
+}
+
+static inline struct hsi_msg *hsc_msg_alloc(unsigned int alloc_size)
+{
+	struct hsi_msg *msg;
+	void *buf;
+
+	msg = hsi_alloc_msg(1, GFP_KERNEL);
+	if (!msg)
+		goto out;
+	buf = kmalloc(alloc_size, GFP_KERNEL);
+	if (!buf) {
+		hsi_free_msg(msg);
+		goto out;
+	}
+	sg_init_one(msg->sgt.sgl, buf, alloc_size);
+	/* Ignore false positive, due to sg pointer handling */
+	kmemleak_ignore(buf);
+
+	return msg;
+out:
+	return NULL;
+}
+
+static inline int hsc_msgs_alloc(struct hsc_channel *channel)
+{
+	struct hsi_msg *msg;
+	int i;
+
+	for (i = 0; i < HSC_MSGS; i++) {
+		msg = hsc_msg_alloc(max_data_size);
+		if (!msg)
+			goto out;
+		msg->channel = channel->ch;
+		list_add_tail(&msg->link, &channel->free_msgs_list);
+	}
+
+	return 0;
+out:
+	hsc_free_list(&channel->free_msgs_list);
+
+	return -ENOMEM;
+}
+
+static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg)
+{
+	return msg->sgt.sgl->length;
+}
+
+static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len)
+{
+	msg->sgt.sgl->length = len;
+}
+
+static void hsc_rx_completed(struct hsi_msg *msg)
+{
+	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+	struct hsc_channel *channel = cl_data->channels + msg->channel;
+
+	if (test_bit(HSC_CH_READ, &channel->flags)) {
+		hsc_add_tail(channel, msg, &channel->rx_msgs_queue);
+		wake_up(&channel->rx_wait);
+	} else {
+		hsc_add_tail(channel, msg, &channel->free_msgs_list);
+	}
+}
+
+static void hsc_rx_msg_destructor(struct hsi_msg *msg)
+{
+	msg->status = HSI_STATUS_ERROR;
+	hsc_msg_len_set(msg, 0);
+	hsc_rx_completed(msg);
+}
+
+static void hsc_tx_completed(struct hsi_msg *msg)
+{
+	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+	struct hsc_channel *channel = cl_data->channels + msg->channel;
+
+	if (test_bit(HSC_CH_WRITE, &channel->flags)) {
+		hsc_add_tail(channel, msg, &channel->tx_msgs_queue);
+		wake_up(&channel->tx_wait);
+	} else {
+		hsc_add_tail(channel, msg, &channel->free_msgs_list);
+	}
+}
+
+static void hsc_tx_msg_destructor(struct hsi_msg *msg)
+{
+	msg->status = HSI_STATUS_ERROR;
+	hsc_msg_len_set(msg, 0);
+	hsc_tx_completed(msg);
+}
+
+static void hsc_break_req_destructor(struct hsi_msg *msg)
+{
+	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+
+	hsi_free_msg(msg);
+	clear_bit(HSC_RXBREAK, &cl_data->flags);
+}
+
+static void hsc_break_received(struct hsi_msg *msg)
+{
+	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
+	struct hsc_channel *channel = cl_data->channels;
+	int i, ret;
+
+	/* Broadcast HWBREAK on all channels */
+	for (i = 0; i < HSC_DEVS; i++, channel++) {
+		struct hsi_msg *msg2;
+
+		if (!test_bit(HSC_CH_READ, &channel->flags))
+			continue;
+		msg2 = hsc_get_first_msg(channel, &channel->free_msgs_list);
+		if (!msg2)
+			continue;
+		clear_bit(HSC_CH_READ, &channel->flags);
+		hsc_msg_len_set(msg2, 0);
+		msg2->status = HSI_STATUS_COMPLETED;
+		hsc_add_tail(channel, msg2, &channel->rx_msgs_queue);
+		wake_up(&channel->rx_wait);
+	}
+	hsi_flush(msg->cl);
+	ret = hsi_async_read(msg->cl, msg);
+	if (ret < 0)
+		hsc_break_req_destructor(msg);
+}
+
+static int hsc_break_request(struct hsi_client *cl)
+{
+	struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
+	struct hsi_msg *msg;
+	int ret;
+
+	if (test_and_set_bit(HSC_RXBREAK, &cl_data->flags))
+		return -EBUSY;
+
+	msg = hsi_alloc_msg(0, GFP_KERNEL);
+	if (!msg) {
+		clear_bit(HSC_RXBREAK, &cl_data->flags);
+		return -ENOMEM;
+	}
+	msg->break_frame = 1;
+	msg->complete = hsc_break_received;
+	msg->destructor = hsc_break_req_destructor;
+	ret = hsi_async_read(cl, msg);
+	if (ret < 0)
+		hsc_break_req_destructor(msg);
+
+	return ret;
+}
+
+static int hsc_break_send(struct hsi_client *cl)
+{
+	struct hsi_msg *msg;
+	int ret;
+
+	msg = hsi_alloc_msg(0, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+	msg->break_frame = 1;
+	msg->complete = hsi_free_msg;
+	msg->destructor = hsi_free_msg;
+	ret = hsi_async_write(cl, msg);
+	if (ret < 0)
+		hsi_free_msg(msg);
+
+	return ret;
+}
+
+static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc)
+{
+	struct hsi_config tmp;
+	int ret;
+
+	if ((rxc->mode != HSI_MODE_STREAM) && (rxc->mode != HSI_MODE_FRAME))
+		return -EINVAL;
+	if ((rxc->channels == 0) || (rxc->channels > HSC_DEVS))
+		return -EINVAL;
+	if (rxc->channels & (rxc->channels - 1))
+		return -EINVAL;
+	if ((rxc->flow != HSI_FLOW_SYNC) && (rxc->flow != HSI_FLOW_PIPE))
+		return -EINVAL;
+	tmp = cl->rx_cfg;
+	cl->rx_cfg.mode = rxc->mode;
+	cl->rx_cfg.channels = rxc->channels;
+	cl->rx_cfg.flow = rxc->flow;
+	ret = hsi_setup(cl);
+	if (ret < 0) {
+		cl->rx_cfg = tmp;
+		return ret;
+	}
+	if (rxc->mode == HSI_MODE_FRAME)
+		hsc_break_request(cl);
+
+	return ret;
+}
+
+static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc)
+{
+	rxc->mode = cl->rx_cfg.mode;
+	rxc->channels = cl->rx_cfg.channels;
+	rxc->flow = cl->rx_cfg.flow;
+}
+
+static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc)
+{
+	struct hsi_config tmp;
+	int ret;
+
+	if ((txc->mode != HSI_MODE_STREAM) && (txc->mode != HSI_MODE_FRAME))
+		return -EINVAL;
+	if ((txc->channels == 0) || (txc->channels > HSC_DEVS))
+		return -EINVAL;
+	if (txc->channels & (txc->channels - 1))
+		return -EINVAL;
+	if ((txc->arb_mode != HSI_ARB_RR) && (txc->arb_mode != HSI_ARB_PRIO))
+		return -EINVAL;
+	tmp = cl->tx_cfg;
+	cl->tx_cfg.mode = txc->mode;
+	cl->tx_cfg.channels = txc->channels;
+	cl->tx_cfg.speed = txc->speed;
+	cl->tx_cfg.arb_mode = txc->arb_mode;
+	ret = hsi_setup(cl);
+	if (ret < 0) {
+		cl->tx_cfg = tmp;
+		return ret;
+	}
+
+	return ret;
+}
+
+static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc)
+{
+	txc->mode = cl->tx_cfg.mode;
+	txc->channels = cl->tx_cfg.channels;
+	txc->speed = cl->tx_cfg.speed;
+	txc->arb_mode = cl->tx_cfg.arb_mode;
+}
+
+static ssize_t hsc_read(struct file *file, char __user *buf, size_t len,
+						loff_t *ppos __maybe_unused)
+{
+	struct hsc_channel *channel = file->private_data;
+	struct hsi_msg *msg;
+	ssize_t ret;
+
+	if (len == 0)
+		return 0;
+	if (!IS_ALIGNED(len, sizeof(u32)))
+		return -EINVAL;
+	if (len > max_data_size)
+		len = max_data_size;
+	if (channel->ch >= channel->cl->rx_cfg.channels)
+		return -ECHRNG;
+	if (test_and_set_bit(HSC_CH_READ, &channel->flags))
+		return -EBUSY;
+	msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
+	if (!msg) {
+		ret = -ENOSPC;
+		goto out;
+	}
+	hsc_msg_len_set(msg, len);
+	msg->complete = hsc_rx_completed;
+	msg->destructor = hsc_rx_msg_destructor;
+	ret = hsi_async_read(channel->cl, msg);
+	if (ret < 0) {
+		hsc_add_tail(channel, msg, &channel->free_msgs_list);
+		goto out;
+	}
+
+	ret = wait_event_interruptible(channel->rx_wait,
+					!list_empty(&channel->rx_msgs_queue));
+	if (ret < 0) {
+		clear_bit(HSC_CH_READ, &channel->flags);
+		hsi_flush(channel->cl);
+		return -EINTR;
+	}
+
+	msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue);
+	if (msg) {
+		if (msg->status != HSI_STATUS_ERROR) {
+			ret = copy_to_user((void __user *)buf,
+			sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg));
+			if (ret)
+				ret = -EFAULT;
+			else
+				ret = hsc_msg_len_get(msg);
+		} else {
+			ret = -EIO;
+		}
+		hsc_add_tail(channel, msg, &channel->free_msgs_list);
+	}
+out:
+	clear_bit(HSC_CH_READ, &channel->flags);
+
+	return ret;
+}
+
+static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len,
+						loff_t *ppos __maybe_unused)
+{
+	struct hsc_channel *channel = file->private_data;
+	struct hsi_msg *msg;
+	ssize_t ret;
+
+	if ((len == 0) || !IS_ALIGNED(len, sizeof(u32)))
+		return -EINVAL;
+	if (len > max_data_size)
+		len = max_data_size;
+	if (channel->ch >= channel->cl->tx_cfg.channels)
+		return -ECHRNG;
+	if (test_and_set_bit(HSC_CH_WRITE, &channel->flags))
+		return -EBUSY;
+	msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
+	if (!msg) {
+		clear_bit(HSC_CH_WRITE, &channel->flags);
+		return -ENOSPC;
+	}
+	if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	hsc_msg_len_set(msg, len);
+	msg->complete = hsc_tx_completed;
+	msg->destructor = hsc_tx_msg_destructor;
+	ret = hsi_async_write(channel->cl, msg);
+	if (ret < 0)
+		goto out;
+
+	ret = wait_event_interruptible(channel->tx_wait,
+					!list_empty(&channel->tx_msgs_queue));
+	if (ret < 0) {
+		clear_bit(HSC_CH_WRITE, &channel->flags);
+		hsi_flush(channel->cl);
+		return -EINTR;
+	}
+
+	msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue);
+	if (msg) {
+		if (msg->status == HSI_STATUS_ERROR)
+			ret = -EIO;
+		else
+			ret = hsc_msg_len_get(msg);
+
+		hsc_add_tail(channel, msg, &channel->free_msgs_list);
+	}
+out:
+	clear_bit(HSC_CH_WRITE, &channel->flags);
+
+	return ret;
+}
+
+static long hsc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct hsc_channel *channel = file->private_data;
+	unsigned int state;
+	struct hsc_rx_config rxc;
+	struct hsc_tx_config txc;
+	long ret = 0;
+
+	switch (cmd) {
+	case HSC_RESET:
+		hsi_flush(channel->cl);
+		break;
+	case HSC_SET_PM:
+		if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
+			return -EFAULT;
+		if (state == HSC_PM_DISABLE) {
+			if (test_and_set_bit(HSC_CH_WLINE, &channel->flags))
+				return -EINVAL;
+			ret = hsi_start_tx(channel->cl);
+		} else if (state == HSC_PM_ENABLE) {
+			if (!test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
+				return -EINVAL;
+			ret = hsi_stop_tx(channel->cl);
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case HSC_SEND_BREAK:
+		return hsc_break_send(channel->cl);
+	case HSC_SET_RX:
+		if (copy_from_user(&rxc, (void __user *)arg, sizeof(rxc)))
+			return -EFAULT;
+		return hsc_rx_set(channel->cl, &rxc);
+	case HSC_GET_RX:
+		hsc_rx_get(channel->cl, &rxc);
+		if (copy_to_user((void __user *)arg, &rxc, sizeof(rxc)))
+			return -EFAULT;
+		break;
+	case HSC_SET_TX:
+		if (copy_from_user(&txc, (void __user *)arg, sizeof(txc)))
+			return -EFAULT;
+		return hsc_tx_set(channel->cl, &txc);
+	case HSC_GET_TX:
+		hsc_tx_get(channel->cl, &txc);
+		if (copy_to_user((void __user *)arg, &txc, sizeof(txc)))
+			return -EFAULT;
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+static inline void __hsc_port_release(struct hsc_client_data *cl_data)
+{
+	BUG_ON(cl_data->usecnt == 0);
+
+	if (--cl_data->usecnt == 0) {
+		hsi_flush(cl_data->cl);
+		hsi_release_port(cl_data->cl);
+	}
+}
+
+static int hsc_open(struct inode *inode, struct file *file)
+{
+	struct hsc_client_data *cl_data;
+	struct hsc_channel *channel;
+	int ret = 0;
+
+	pr_debug("open, minor = %d\n", iminor(inode));
+
+	cl_data = container_of(inode->i_cdev, struct hsc_client_data, cdev);
+	mutex_lock(&cl_data->lock);
+	channel = cl_data->channels + (iminor(inode) & HSC_CH_MASK);
+
+	if (test_and_set_bit(HSC_CH_OPEN, &channel->flags)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	/*
+	 * Check if we have already claimed the port associated to the HSI
+	 * client. If not then try to claim it, else increase its refcount
+	 */
+	if (cl_data->usecnt == 0) {
+		ret = hsi_claim_port(cl_data->cl, 0);
+		if (ret < 0)
+			goto out;
+		hsi_setup(cl_data->cl);
+	}
+	cl_data->usecnt++;
+
+	ret = hsc_msgs_alloc(channel);
+	if (ret < 0) {
+		__hsc_port_release(cl_data);
+		goto out;
+	}
+
+	file->private_data = channel;
+	mutex_unlock(&cl_data->lock);
+
+	return ret;
+out:
+	mutex_unlock(&cl_data->lock);
+
+	return ret;
+}
+
+static int hsc_release(struct inode *inode __maybe_unused, struct file *file)
+{
+	struct hsc_channel *channel = file->private_data;
+	struct hsc_client_data *cl_data = channel->cl_data;
+
+	mutex_lock(&cl_data->lock);
+	file->private_data = NULL;
+	if (test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
+		hsi_stop_tx(channel->cl);
+	__hsc_port_release(cl_data);
+	hsc_reset_list(channel, &channel->rx_msgs_queue);
+	hsc_reset_list(channel, &channel->tx_msgs_queue);
+	hsc_reset_list(channel, &channel->free_msgs_list);
+	clear_bit(HSC_CH_READ, &channel->flags);
+	clear_bit(HSC_CH_WRITE, &channel->flags);
+	clear_bit(HSC_CH_OPEN, &channel->flags);
+	wake_up(&channel->rx_wait);
+	wake_up(&channel->tx_wait);
+	mutex_unlock(&cl_data->lock);
+
+	return 0;
+}
+
+static const struct file_operations hsc_fops = {
+	.owner		= THIS_MODULE,
+	.read		= hsc_read,
+	.write		= hsc_write,
+	.unlocked_ioctl	= hsc_ioctl,
+	.open		= hsc_open,
+	.release	= hsc_release,
+};
+
+static void __devinit hsc_channel_init(struct hsc_channel *channel)
+{
+	init_waitqueue_head(&channel->rx_wait);
+	init_waitqueue_head(&channel->tx_wait);
+	spin_lock_init(&channel->lock);
+	INIT_LIST_HEAD(&channel->free_msgs_list);
+	INIT_LIST_HEAD(&channel->rx_msgs_queue);
+	INIT_LIST_HEAD(&channel->tx_msgs_queue);
+}
+
+static int __devinit hsc_probe(struct device *dev)
+{
+	const char devname[] = "hsi_char";
+	struct hsc_client_data *cl_data;
+	struct hsc_channel *channel;
+	struct hsi_client *cl = to_hsi_client(dev);
+	unsigned int hsc_baseminor;
+	dev_t hsc_dev;
+	int ret;
+	int i;
+
+	cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+	if (!cl_data) {
+		dev_err(dev, "Could not allocate hsc_client_data\n");
+		return -ENOMEM;
+	}
+	hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl));
+	if (!hsc_major) {
+		ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor,
+						HSC_DEVS, devname);
+		if (ret > 0)
+			hsc_major = MAJOR(hsc_dev);
+	} else {
+		hsc_dev = MKDEV(hsc_major, hsc_baseminor);
+		ret = register_chrdev_region(hsc_dev, HSC_DEVS, devname);
+	}
+	if (ret < 0) {
+		dev_err(dev, "Device %s allocation failed %d\n",
+					hsc_major ? "minor" : "major", ret);
+		goto out1;
+	}
+	mutex_init(&cl_data->lock);
+	hsi_client_set_drvdata(cl, cl_data);
+	cdev_init(&cl_data->cdev, &hsc_fops);
+	cl_data->cdev.owner = THIS_MODULE;
+	cl_data->cl = cl;
+	for (i = 0, channel = cl_data->channels; i < HSC_DEVS; i++, channel++) {
+		hsc_channel_init(channel);
+		channel->ch = i;
+		channel->cl = cl;
+		channel->cl_data = cl_data;
+	}
+
+	/* 1 hsi client -> N char devices (one for each channel) */
+	ret = cdev_add(&cl_data->cdev, hsc_dev, HSC_DEVS);
+	if (ret) {
+		dev_err(dev, "Could not add char device %d\n", ret);
+		goto out2;
+	}
+
+	return 0;
+out2:
+	unregister_chrdev_region(hsc_dev, HSC_DEVS);
+out1:
+	kfree(cl_data);
+
+	return ret;
+}
+
+static int __devexit hsc_remove(struct device *dev)
+{
+	struct hsi_client *cl = to_hsi_client(dev);
+	struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
+	dev_t hsc_dev = cl_data->cdev.dev;
+
+	cdev_del(&cl_data->cdev);
+	unregister_chrdev_region(hsc_dev, HSC_DEVS);
+	hsi_client_set_drvdata(cl, NULL);
+	kfree(cl_data);
+
+	return 0;
+}
+
+static struct hsi_client_driver hsc_driver = {
+	.driver = {
+		.name	= "hsi_char",
+		.owner	= THIS_MODULE,
+		.probe	= hsc_probe,
+		.remove	= __devexit_p(hsc_remove),
+	},
+};
+
+static int __init hsc_init(void)
+{
+	int ret;
+
+	if ((max_data_size < 4) || (max_data_size > 0x10000) ||
+		(max_data_size & (max_data_size - 1))) {
+		pr_err("Invalid max read/write data size");
+		return -EINVAL;
+	}
+
+	ret = hsi_register_client_driver(&hsc_driver);
+	if (ret) {
+		pr_err("Error while registering HSI/SSI driver %d", ret);
+		return ret;
+	}
+
+	pr_info("HSI/SSI char device loaded\n");
+
+	return 0;
+}
+module_init(hsc_init);
+
+static void __exit hsc_exit(void)
+{
+	hsi_unregister_client_driver(&hsc_driver);
+	pr_info("HSI char device removed\n");
+}
+module_exit(hsc_exit);
+
+MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
+MODULE_ALIAS("hsi:hsi_char");
+MODULE_DESCRIPTION("HSI character device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
new file mode 100644
index 0000000..4e2d79b
--- /dev/null
+++ b/drivers/hsi/hsi.c
@@ -0,0 +1,494 @@
+/*
+ * HSI core.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/hsi/hsi.h>
+#include <linux/compiler.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "hsi_core.h"
+
+static struct device_type hsi_ctrl = {
+	.name	= "hsi_controller",
+};
+
+static struct device_type hsi_cl = {
+	.name	= "hsi_client",
+};
+
+static struct device_type hsi_port = {
+	.name	= "hsi_port",
+};
+
+static ssize_t modalias_show(struct device *dev,
+			struct device_attribute *a __maybe_unused, char *buf)
+{
+	return sprintf(buf, "hsi:%s\n", dev_name(dev));
+}
+
+static struct device_attribute hsi_bus_dev_attrs[] = {
+	__ATTR_RO(modalias),
+	__ATTR_NULL,
+};
+
+static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	if (dev->type == &hsi_cl)
+		add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
+
+	return 0;
+}
+
+static int hsi_bus_match(struct device *dev, struct device_driver *driver)
+{
+	return strcmp(dev_name(dev), driver->name) == 0;
+}
+
+static struct bus_type hsi_bus_type = {
+	.name		= "hsi",
+	.dev_attrs	= hsi_bus_dev_attrs,
+	.match		= hsi_bus_match,
+	.uevent		= hsi_bus_uevent,
+};
+
+static void hsi_client_release(struct device *dev)
+{
+	kfree(to_hsi_client(dev));
+}
+
+static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
+{
+	struct hsi_client *cl;
+	unsigned long flags;
+
+	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+	if (!cl)
+		return;
+	cl->device.type = &hsi_cl;
+	cl->tx_cfg = info->tx_cfg;
+	cl->rx_cfg = info->rx_cfg;
+	cl->device.bus = &hsi_bus_type;
+	cl->device.parent = &port->device;
+	cl->device.release = hsi_client_release;
+	dev_set_name(&cl->device, info->name);
+	cl->device.platform_data = info->platform_data;
+	spin_lock_irqsave(&port->clock, flags);
+	list_add_tail(&cl->link, &port->clients);
+	spin_unlock_irqrestore(&port->clock, flags);
+	if (info->archdata)
+		cl->device.archdata = *info->archdata;
+	if (device_register(&cl->device) < 0) {
+		pr_err("hsi: failed to register client: %s\n", info->name);
+		kfree(cl);
+	}
+}
+
+static void hsi_scan_board_info(struct hsi_controller *hsi)
+{
+	struct hsi_cl_info *cl_info;
+	struct hsi_port	*p;
+
+	list_for_each_entry(cl_info, &hsi_board_list, list)
+		if (cl_info->info.hsi_id == hsi->id) {
+			p = hsi_find_port_num(hsi, cl_info->info.port);
+			if (!p)
+				continue;
+			hsi_new_client(p, &cl_info->info);
+		}
+}
+
+static int hsi_remove_client(struct device *dev, void *data __maybe_unused)
+{
+	struct hsi_client *cl = to_hsi_client(dev);
+	struct hsi_port *port = to_hsi_port(dev->parent);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->clock, flags);
+	list_del(&cl->link);
+	spin_unlock_irqrestore(&port->clock, flags);
+	device_unregister(dev);
+
+	return 0;
+}
+
+static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
+{
+	device_for_each_child(dev, NULL, hsi_remove_client);
+	device_unregister(dev);
+
+	return 0;
+}
+
+static void hsi_controller_release(struct device *dev __maybe_unused)
+{
+}
+
+static void hsi_port_release(struct device *dev __maybe_unused)
+{
+}
+
+/**
+ * hsi_unregister_controller - Unregister an HSI controller
+ * @hsi: The HSI controller to register
+ */
+void hsi_unregister_controller(struct hsi_controller *hsi)
+{
+	device_for_each_child(&hsi->device, NULL, hsi_remove_port);
+	device_unregister(&hsi->device);
+}
+EXPORT_SYMBOL_GPL(hsi_unregister_controller);
+
+/**
+ * hsi_register_controller - Register an HSI controller and its ports
+ * @hsi: The HSI controller to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_controller(struct hsi_controller *hsi)
+{
+	unsigned int i;
+	int err;
+
+	hsi->device.type = &hsi_ctrl;
+	hsi->device.bus = &hsi_bus_type;
+	hsi->device.release = hsi_controller_release;
+	err = device_register(&hsi->device);
+	if (err < 0)
+		return err;
+	for (i = 0; i < hsi->num_ports; i++) {
+		hsi->port[i].device.parent = &hsi->device;
+		hsi->port[i].device.bus = &hsi_bus_type;
+		hsi->port[i].device.release = hsi_port_release;
+		hsi->port[i].device.type = &hsi_port;
+		INIT_LIST_HEAD(&hsi->port[i].clients);
+		spin_lock_init(&hsi->port[i].clock);
+		err = device_register(&hsi->port[i].device);
+		if (err < 0)
+			goto out;
+	}
+	/* Populate HSI bus with HSI clients */
+	hsi_scan_board_info(hsi);
+
+	return 0;
+out:
+	hsi_unregister_controller(hsi);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(hsi_register_controller);
+
+/**
+ * hsi_register_client_driver - Register an HSI client to the HSI bus
+ * @drv: HSI client driver to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_client_driver(struct hsi_client_driver *drv)
+{
+	drv->driver.bus = &hsi_bus_type;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(hsi_register_client_driver);
+
+static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
+{
+	return 0;
+}
+
+static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
+{
+	return 0;
+}
+
+/**
+ * hsi_alloc_controller - Allocate an HSI controller and its ports
+ * @n_ports: Number of ports on the HSI controller
+ * @flags: Kernel allocation flags
+ *
+ * Return NULL on failure or a pointer to an hsi_controller on success.
+ */
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
+{
+	struct hsi_controller	*hsi;
+	struct hsi_port		*port;
+	unsigned int		i;
+
+	if (!n_ports)
+		return NULL;
+
+	port = kzalloc(sizeof(*port)*n_ports, flags);
+	if (!port)
+		return NULL;
+	hsi = kzalloc(sizeof(*hsi), flags);
+	if (!hsi)
+		goto out;
+	for (i = 0; i < n_ports; i++) {
+		dev_set_name(&port[i].device, "port%d", i);
+		port[i].num = i;
+		port[i].async = hsi_dummy_msg;
+		port[i].setup = hsi_dummy_cl;
+		port[i].flush = hsi_dummy_cl;
+		port[i].start_tx = hsi_dummy_cl;
+		port[i].stop_tx = hsi_dummy_cl;
+		port[i].release = hsi_dummy_cl;
+		mutex_init(&port[i].lock);
+	}
+	hsi->num_ports = n_ports;
+	hsi->port = port;
+
+	return hsi;
+out:
+	kfree(port);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_controller);
+
+/**
+ * hsi_free_controller - Free an HSI controller
+ * @hsi: Pointer to HSI controller
+ */
+void hsi_free_controller(struct hsi_controller *hsi)
+{
+	if (!hsi)
+		return;
+
+	kfree(hsi->port);
+	kfree(hsi);
+}
+EXPORT_SYMBOL_GPL(hsi_free_controller);
+
+/**
+ * hsi_free_msg - Free an HSI message
+ * @msg: Pointer to the HSI message
+ *
+ * Client is responsible to free the buffers pointed by the scatterlists.
+ */
+void hsi_free_msg(struct hsi_msg *msg)
+{
+	if (!msg)
+		return;
+	sg_free_table(&msg->sgt);
+	kfree(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_free_msg);
+
+/**
+ * hsi_alloc_msg - Allocate an HSI message
+ * @nents: Number of memory entries
+ * @flags: Kernel allocation flags
+ *
+ * nents can be 0. This mainly makes sense for read transfer.
+ * In that case, HSI drivers will call the complete callback when
+ * there is data to be read without consuming it.
+ *
+ * Return NULL on failure or a pointer to an hsi_msg on success.
+ */
+struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags)
+{
+	struct hsi_msg *msg;
+	int err;
+
+	msg = kzalloc(sizeof(*msg), flags);
+	if (!msg)
+		return NULL;
+
+	if (!nents)
+		return msg;
+
+	err = sg_alloc_table(&msg->sgt, nents, flags);
+	if (unlikely(err)) {
+		kfree(msg);
+		msg = NULL;
+	}
+
+	return msg;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_msg);
+
+/**
+ * hsi_async - Submit an HSI transfer to the controller
+ * @cl: HSI client sending the transfer
+ * @msg: The HSI transfer passed to controller
+ *
+ * The HSI message must have the channel, ttype, complete and destructor
+ * fields set beforehand. If nents > 0 then the client has to initialize
+ * also the scatterlists to point to the buffers to write to or read from.
+ *
+ * HSI controllers relay on pre-allocated buffers from their clients and they
+ * do not allocate buffers on their own.
+ *
+ * Once the HSI message transfer finishes, the HSI controller calls the
+ * complete callback with the status and actual_len fields of the HSI message
+ * updated. The complete callback can be called before returning from
+ * hsi_async.
+ *
+ * Returns -errno on failure or 0 on success
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg)
+{
+	struct hsi_port *port = hsi_get_port(cl);
+
+	if (!hsi_port_claimed(cl))
+		return -EACCES;
+
+	WARN_ON_ONCE(!msg->destructor || !msg->complete);
+	msg->cl = cl;
+
+	return port->async(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_async);
+
+/**
+ * hsi_claim_port - Claim the HSI client's port
+ * @cl: HSI client that wants to claim its port
+ * @share: Flag to indicate if the client wants to share the port or not.
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_claim_port(struct hsi_client *cl, unsigned int share)
+{
+	struct hsi_port *port = hsi_get_port(cl);
+	int err = 0;
+
+	mutex_lock(&port->lock);
+	if ((port->claimed) && (!port->shared || !share)) {
+		err = -EBUSY;
+		goto out;
+	}
+	if (!try_module_get(to_hsi_controller(port->device.parent)->owner)) {
+		err = -ENODEV;
+		goto out;
+	}
+	port->claimed++;
+	port->shared = !!share;
+	cl->pclaimed = 1;
+out:
+	mutex_unlock(&port->lock);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(hsi_claim_port);
+
+/**
+ * hsi_release_port - Release the HSI client's port
+ * @cl: HSI client which previously claimed its port
+ */
+void hsi_release_port(struct hsi_client *cl)
+{
+	struct hsi_port *port = hsi_get_port(cl);
+
+	mutex_lock(&port->lock);
+	/* Allow HW driver to do some cleanup */
+	port->release(cl);
+	if (cl->pclaimed)
+		port->claimed--;
+	BUG_ON(port->claimed < 0);
+	cl->pclaimed = 0;
+	if (!port->claimed)
+		port->shared = 0;
+	module_put(to_hsi_controller(port->device.parent)->owner);
+	mutex_unlock(&port->lock);
+}
+EXPORT_SYMBOL_GPL(hsi_release_port);
+
+static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused)
+{
+	if (cl->hsi_start_rx)
+		(*cl->hsi_start_rx)(cl);
+
+	return 0;
+}
+
+static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused)
+{
+	if (cl->hsi_stop_rx)
+		(*cl->hsi_stop_rx)(cl);
+
+	return 0;
+}
+
+static int hsi_port_for_each_client(struct hsi_port *port, void *data,
+				int (*fn)(struct hsi_client *cl, void *data))
+{
+	struct hsi_client *cl;
+
+	spin_lock(&port->clock);
+	list_for_each_entry(cl, &port->clients, link) {
+		spin_unlock(&port->clock);
+		(*fn)(cl, data);
+		spin_lock(&port->clock);
+	}
+	spin_unlock(&port->clock);
+
+	return 0;
+}
+
+/**
+ * hsi_event -Notifies clients about port events
+ * @port: Port where the event occurred
+ * @event: The event type
+ *
+ * Clients should not be concerned about wake line behavior. However, due
+ * to a race condition in HSI HW protocol, clients need to be notified
+ * about wake line changes, so they can implement a workaround for it.
+ *
+ * Events:
+ * HSI_EVENT_START_RX - Incoming wake line high
+ * HSI_EVENT_STOP_RX - Incoming wake line down
+ */
+void hsi_event(struct hsi_port *port, unsigned int event)
+{
+	int (*fn)(struct hsi_client *cl, void *data);
+
+	switch (event) {
+	case HSI_EVENT_START_RX:
+		fn = hsi_start_rx;
+		break;
+	case HSI_EVENT_STOP_RX:
+		fn = hsi_stop_rx;
+		break;
+	default:
+		return;
+	}
+	hsi_port_for_each_client(port, NULL, fn);
+}
+EXPORT_SYMBOL_GPL(hsi_event);
+
+static int __init hsi_init(void)
+{
+	return bus_register(&hsi_bus_type);
+}
+postcore_initcall(hsi_init);
+
+static void __exit hsi_exit(void)
+{
+	bus_unregister(&hsi_bus_type);
+}
+module_exit(hsi_exit);
+
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_DESCRIPTION("High-speed Synchronous Serial Interface (HSI) framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/hsi_boardinfo.c b/drivers/hsi/hsi_boardinfo.c
new file mode 100644
index 0000000..e56bc6d
--- /dev/null
+++ b/drivers/hsi/hsi_boardinfo.c
@@ -0,0 +1,62 @@
+/*
+ * HSI clients registration interface
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/hsi/hsi.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "hsi_core.h"
+
+/*
+ * hsi_board_list is only used internally by the HSI framework.
+ * No one else is allowed to make use of it.
+ */
+LIST_HEAD(hsi_board_list);
+EXPORT_SYMBOL_GPL(hsi_board_list);
+
+/**
+ * hsi_register_board_info - Register HSI clients information
+ * @info: Array of HSI clients on the board
+ * @len: Length of the array
+ *
+ * HSI clients are statically declared and registered on board files.
+ *
+ * HSI clients will be automatically registered to the HSI bus once the
+ * controller and the port where the clients wishes to attach are registered
+ * to it.
+ *
+ * Return -errno on failure, 0 on success.
+ */
+int __init hsi_register_board_info(struct hsi_board_info const *info,
+							unsigned int len)
+{
+	struct hsi_cl_info *cl_info;
+
+	cl_info = kzalloc(sizeof(*cl_info) * len, GFP_KERNEL);
+	if (!cl_info)
+		return -ENOMEM;
+
+	for (; len; len--, info++, cl_info++) {
+		cl_info->info = *info;
+		list_add_tail(&cl_info->list, &hsi_board_list);
+	}
+
+	return 0;
+}
diff --git a/drivers/hsi/hsi_core.h b/drivers/hsi/hsi_core.h
new file mode 100644
index 0000000..ab5c2fb
--- /dev/null
+++ b/drivers/hsi/hsi_core.h
@@ -0,0 +1,35 @@
+/*
+ * HSI framework internal interfaces,
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_CORE_H__
+#define __LINUX_HSI_CORE_H__
+
+#include <linux/hsi/hsi.h>
+
+struct hsi_cl_info {
+	struct list_head	list;
+	struct hsi_board_info	info;
+};
+
+extern struct list_head hsi_board_list;
+
+#endif /* __LINUX_HSI_CORE_H__ */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5b32d56..8deedc1 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -253,7 +253,8 @@
 	  If you say yes here you get support for the temperature
 	  sensor(s) inside your CPU. Supported are later revisions of
 	  the AMD Family 10h and all revisions of the AMD Family 11h,
-	  12h (Llano), 14h (Brazos) and 15h (Bulldozer) microarchitectures.
+	  12h (Llano), 14h (Brazos) and 15h (Bulldozer/Trinity)
+	  microarchitectures.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called k10temp.
@@ -425,7 +426,7 @@
 
 config SENSORS_GPIO_FAN
 	tristate "GPIO fan"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  If you say yes here you get support for fans connected to GPIO lines.
 
@@ -883,7 +884,7 @@
 
 config SENSORS_SHT15
 	tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  If you say yes here you get support for the Sensiron SHT10, SHT11,
 	  SHT15, SHT71, SHT75 humidity and temperature sensors.
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 554f046..145f135 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -632,6 +632,7 @@
 		sensors->dev_attr.show = ro->show;
 		sensors->index = ro->index;
 
+		sysfs_attr_init(&sensors->dev_attr.attr);
 		res = device_create_file(dev, &sensors->dev_attr);
 		if (res) {
 			sensors->dev_attr.attr.name = NULL;
@@ -661,6 +662,7 @@
 		sensors->dev_attr.store = rw->set;
 		sensors->index = rw->index;
 
+		sysfs_attr_init(&sensors->dev_attr.attr);
 		res = device_create_file(dev, &sensors->dev_attr);
 		if (res) {
 			sensors->dev_attr.attr.name = NULL;
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 0e0cfcc..ce43642 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -128,6 +128,7 @@
 		ret = PTR_ERR(chip->hwmon_dev);
 		goto error_remove_group;
 	}
+	chip->spi_dev = spi_dev;
 
 	return 0;
 error_remove_group:
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index ff37363..44e1fd7 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -233,18 +233,15 @@
  * nearest match if no exact match where found.
  */
 static int
-get_fan_auto_nearest(struct adm1031_data *data,
-		     int chan, u8 val, u8 reg, u8 *new_reg)
+get_fan_auto_nearest(struct adm1031_data *data, int chan, u8 val, u8 reg)
 {
 	int i;
 	int first_match = -1, exact_match = -1;
 	u8 other_reg_val =
 	    (*data->chan_select_table)[FAN_CHAN_FROM_REG(reg)][chan ? 0 : 1];
 
-	if (val == 0) {
-		*new_reg = 0;
+	if (val == 0)
 		return 0;
-	}
 
 	for (i = 0; i < 8; i++) {
 		if ((val == (*data->chan_select_table)[i][chan]) &&
@@ -264,13 +261,11 @@
 	}
 
 	if (exact_match >= 0)
-		*new_reg = exact_match;
+		return exact_match;
 	else if (first_match >= 0)
-		*new_reg = first_match;
-	else
-		return -EINVAL;
+		return first_match;
 
-	return 0;
+	return -EINVAL;
 }
 
 static ssize_t show_fan_auto_channel(struct device *dev,
@@ -301,11 +296,12 @@
 
 	mutex_lock(&data->update_lock);
 
-	ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg);
-	if (ret) {
+	ret = get_fan_auto_nearest(data, nr, val, data->conf1);
+	if (ret < 0) {
 		mutex_unlock(&data->update_lock);
 		return ret;
 	}
+	reg = ret;
 	data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1);
 	if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) ^
 	    (old_fan_mode & ADM1031_CONF1_AUTO_MODE)) {
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 729499e..ece4159 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -276,6 +276,7 @@
 		return false;
 	default:
 		BUG();
+		return true;
 	}
 }
 
@@ -291,6 +292,7 @@
 		return true;
 	default:
 		BUG();
+		return false;
 	}
 }
 
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index aba29d6..307bb32 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -33,6 +33,9 @@
 module_param(force, bool, 0444);
 MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
 
+/* PCI-IDs for Northbridge devices not used anywhere else */
+#define PCI_DEVICE_ID_AMD_15H_M10H_NB_F3	0x1403
+
 /* CPUID function 0x80000001, ebx */
 #define CPUID_PKGTYPE_MASK	0xf0000000
 #define CPUID_PKGTYPE_F		0x00000000
@@ -210,6 +213,7 @@
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_NB_F3) },
 	{}
 };
 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 193067e..de8f7ad 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -596,8 +596,10 @@
 	return 0;
 }
 
-static int max6639_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int max6639_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
 	if (data < 0)
 		return data;
@@ -606,8 +608,9 @@
 			MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
 }
 
-static int max6639_resume(struct i2c_client *client)
+static int max6639_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
 	if (data < 0)
 		return data;
@@ -615,6 +618,7 @@
 	return i2c_smbus_write_byte_data(client,
 			MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct i2c_device_id max6639_id[] = {
 	{"max6639", 0},
@@ -623,15 +627,18 @@
 
 MODULE_DEVICE_TABLE(i2c, max6639_id);
 
+static const struct dev_pm_ops max6639_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(max6639_suspend, max6639_resume)
+};
+
 static struct i2c_driver max6639_driver = {
 	.class = I2C_CLASS_HWMON,
 	.driver = {
 		   .name = "max6639",
+		   .pm = &max6639_pm_ops,
 		   },
 	.probe = max6639_probe,
 	.remove = max6639_remove,
-	.suspend = max6639_suspend,
-	.resume = max6639_resume,
 	.id_table = max6639_id,
 	.detect = max6639_detect,
 	.address_list = normal_i2c,
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index a25350c..54922ed 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -2619,15 +2619,15 @@
 static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
 				 struct w83627ehf_sio_data *sio_data)
 {
-	static const char __initdata sio_name_W83627EHF[] = "W83627EHF";
-	static const char __initdata sio_name_W83627EHG[] = "W83627EHG";
-	static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
-	static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
-	static const char __initdata sio_name_W83627UHG[] = "W83627UHG";
-	static const char __initdata sio_name_W83667HG[] = "W83667HG";
-	static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B";
-	static const char __initdata sio_name_NCT6775[] = "NCT6775F";
-	static const char __initdata sio_name_NCT6776[] = "NCT6776F";
+	static const char sio_name_W83627EHF[] __initconst = "W83627EHF";
+	static const char sio_name_W83627EHG[] __initconst = "W83627EHG";
+	static const char sio_name_W83627DHG[] __initconst = "W83627DHG";
+	static const char sio_name_W83627DHG_P[] __initconst = "W83627DHG-P";
+	static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
+	static const char sio_name_W83667HG[] __initconst = "W83667HG";
+	static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
+	static const char sio_name_NCT6775[] __initconst = "NCT6775F";
+	static const char sio_name_NCT6776[] __initconst = "NCT6776F";
 
 	u16 val;
 	const char *sio_name;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 37f4211..00e8f21 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -182,7 +182,6 @@
 	pci_restore_state(pdev);
 
 	i2c_dw_init(i2c);
-	i2c_dw_enable(i2c);
 	return 0;
 }
 
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index c976285..fa080eb 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -516,12 +516,6 @@
 
 MODULE_DEVICE_TABLE(pci, pci_tbl);
 
-int stats_open_generic(struct inode *inode, struct file *fp)
-{
-	fp->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
 				loff_t *off)
 {
@@ -534,7 +528,7 @@
 }
 
 static const struct file_operations idle_fops = {
-	.open	= stats_open_generic,
+	.open	= simple_open,
 	.read	= stats_read_ul,
 	.llseek = default_llseek,
 };
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 83b720e..246fdc1 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -179,7 +179,7 @@
 {
 	struct ib_port_attr attr;
 	char *speed = "";
-	int rate = -1;		/* in deci-Gb/sec */
+	int rate;		/* in deci-Gb/sec */
 	ssize_t ret;
 
 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
@@ -187,9 +187,6 @@
 		return ret;
 
 	switch (attr.active_speed) {
-	case IB_SPEED_SDR:
-		rate = 25;
-		break;
 	case IB_SPEED_DDR:
 		speed = " DDR";
 		rate = 50;
@@ -210,6 +207,10 @@
 		speed = " EDR";
 		rate = 250;
 		break;
+	case IB_SPEED_SDR:
+	default:		/* default to SDR for invalid rates */
+		rate = 25;
+		break;
 	}
 
 	rate *= ib_width_enum_to_int(attr.active_width);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75d3056..669673e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,6 +253,11 @@
 		if (out_mad->data[15] & 0x1)
 			props->active_speed = IB_SPEED_FDR10;
 	}
+
+	/* Avoid wrong speed value returned by FW if the IB link is down. */
+	if (props->state == IB_PORT_DOWN)
+		 props->active_speed = IB_SPEED_SDR;
+
 out:
 	kfree(in_mad);
 	kfree(out_mad);
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 34aebb8..3c843cd 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -95,7 +95,8 @@
 	input_dev = input_allocate_device();
 	if (!onkey || !input_dev) {
 		dev_err(&pdev->dev, "Failed to allocate memory\n");
-		return -ENOMEM;
+		error = -ENOMEM;
+		goto err_free_mem;
 	}
 
 	onkey->input = input_dev;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index d2c0db1..4790110 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -486,7 +486,6 @@
 	unsigned char *packet = psmouse->packet;
 
 	input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
-	input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
 	input_mt_report_pointer_emulation(dev, true);
 	input_sync(dev);
 }
@@ -967,6 +966,7 @@
 	if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
 		return -1;
 
+	__set_bit(INPUT_PROP_POINTER, dev->propbit);
 	__set_bit(EV_KEY, dev->evbit);
 	__set_bit(EV_ABS, dev->evbit);
 	__clear_bit(EV_REL, dev->evbit);
@@ -1017,7 +1017,9 @@
 			 */
 			psmouse_warn(psmouse, "couldn't query resolution data.\n");
 		}
-
+		/* v4 is clickpad, with only one button. */
+		__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+		__clear_bit(BTN_RIGHT, dev->keybit);
 		__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
 		/* For X to recognize me as touchpad. */
 		input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
@@ -1245,6 +1247,8 @@
  */
 static int elantech_reconnect(struct psmouse *psmouse)
 {
+	psmouse_reset(psmouse);
+
 	if (elantech_detect(psmouse, 0))
 		return -1;
 
@@ -1324,6 +1328,8 @@
 	if (!etd)
 		return -ENOMEM;
 
+	psmouse_reset(psmouse);
+
 	etd->parity[0] = 1;
 	for (i = 1; i < 256; i++)
 		etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index a9ad8e1..39fe9b7 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -12,9 +12,9 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/input-polldev.h>
+#include <linux/gpio.h>
 #include <linux/gpio_mouse.h>
 
-#include <asm/gpio.h>
 
 /*
  * Timer function which is run every scan_ms ms when the device is opened.
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index a977bfa..661a0ca 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -741,6 +741,14 @@
 			}
 		} else {
 			/* SFAC packet */
+			if ((packet[0] & (FSP_PB0_LBTN|FSP_PB0_PHY_BTN)) ==
+				FSP_PB0_LBTN) {
+				/* On-pad click in SFAC mode should be handled
+				 * by userspace.  On-pad clicks in MFMC mode
+				 * are real clickpad clicks, and not ignored.
+				 */
+				packet[0] &= ~FSP_PB0_LBTN;
+			}
 
 			/* no multi-finger information */
 			ad->last_mt_fgr = 0;
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 22b2180..f310249 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -304,7 +304,7 @@
 		return 0;
 
 	if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
-		printk(KERN_WARNING "trackpoint.c: failed to get extended button data\n");
+		psmouse_warn(psmouse, "failed to get extended button data\n");
 		button_info = 0;
 	}
 
@@ -326,16 +326,18 @@
 
 	error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
 	if (error) {
-		printk(KERN_ERR
-			"trackpoint.c: failed to create sysfs attributes, error: %d\n",
-			error);
+		psmouse_err(psmouse,
+			    "failed to create sysfs attributes, error: %d\n",
+			    error);
 		kfree(psmouse->private);
 		psmouse->private = NULL;
 		return -1;
 	}
 
-	printk(KERN_INFO "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
-		firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f);
+	psmouse_info(psmouse,
+		     "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+		     firmware_id,
+		     (button_info & 0xf0) >> 4, button_info & 0x0f);
 
 	return 0;
 }
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 6c6f6d8..f7eda3d0 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -1,6 +1,4 @@
 /*
- * drivers/input/touchscreen/tps6507x_ts.c
- *
  * Touchscreen driver for the tps6507x chip.
  *
  * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
@@ -376,4 +374,4 @@
 MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
 MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps6507x-tsc");
+MODULE_ALIAS("platform:tps6507x-ts");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index ae2ec92..a5bee8e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2707,7 +2707,8 @@
  * The exported alloc_coherent function for dma_ops.
  */
 static void *alloc_coherent(struct device *dev, size_t size,
-			    dma_addr_t *dma_addr, gfp_t flag)
+			    dma_addr_t *dma_addr, gfp_t flag,
+			    struct dma_attrs *attrs)
 {
 	unsigned long flags;
 	void *virt_addr;
@@ -2765,7 +2766,8 @@
  * The exported free_coherent function for dma_ops.
  */
 static void free_coherent(struct device *dev, size_t size,
-			  void *virt_addr, dma_addr_t dma_addr)
+			  void *virt_addr, dma_addr_t dma_addr,
+			  struct dma_attrs *attrs)
 {
 	unsigned long flags;
 	struct protection_domain *domain;
@@ -2846,8 +2848,8 @@
 }
 
 static struct dma_map_ops amd_iommu_dma_ops = {
-	.alloc_coherent = alloc_coherent,
-	.free_coherent = free_coherent,
+	.alloc = alloc_coherent,
+	.free = free_coherent,
 	.map_page = map_page,
 	.unmap_page = unmap_page,
 	.map_sg = map_sg,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 132f93b..f93d5ac 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2949,7 +2949,8 @@
 }
 
 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
-				  dma_addr_t *dma_handle, gfp_t flags)
+				  dma_addr_t *dma_handle, gfp_t flags,
+				  struct dma_attrs *attrs)
 {
 	void *vaddr;
 	int order;
@@ -2981,7 +2982,7 @@
 }
 
 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
-				dma_addr_t dma_handle)
+				dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	int order;
 
@@ -3126,8 +3127,8 @@
 }
 
 struct dma_map_ops intel_dma_ops = {
-	.alloc_coherent = intel_alloc_coherent,
-	.free_coherent = intel_free_coherent,
+	.alloc = intel_alloc_coherent,
+	.free = intel_free_coherent,
 	.map_sg = intel_map_sg,
 	.unmap_sg = intel_unmap_sg,
 	.map_page = intel_map_page,
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 103dbd9..f55fc5d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -323,15 +323,9 @@
 	return count;
 }
 
-static int debug_open_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 #define DEBUG_FOPS(name)						\
 	static const struct file_operations debug_##name##_fops = {	\
-		.open = debug_open_generic,				\
+		.open = simple_open,					\
 		.read = debug_read_##name,				\
 		.write = debug_write_##name,				\
 		.llseek = generic_file_llseek,				\
@@ -339,7 +333,7 @@
 
 #define DEBUG_FOPS_RO(name)						\
 	static const struct file_operations debug_##name##_fops = {	\
-		.open = debug_open_generic,				\
+		.open = simple_open,					\
 		.read = debug_read_##name,				\
 		.llseek = generic_file_llseek,				\
 	};
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 05ed4d0c..c0b8c96 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -891,7 +891,7 @@
 {
 	struct bchannel		*bch;
 
-	if (rq->adr.channel > 2)
+	if (rq->adr.channel == 0 || rq->adr.channel > 2)
 		return -EINVAL;
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index d055ae7..e2c83a2 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1962,7 +1962,7 @@
 {
 	struct bchannel		*bch;
 
-	if (rq->adr.channel > 2)
+	if (rq->adr.channel == 0 || rq->adr.channel > 2)
 		return -EINVAL;
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 6023387..8cde2a0 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -486,7 +486,7 @@
 {
 	struct bchannel		*bch;
 
-	if (rq->adr.channel > 2)
+	if (rq->adr.channel == 0 || rq->adr.channel > 2)
 		return -EINVAL;
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index b47e9be..884369f 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1506,7 +1506,7 @@
 {
 	struct bchannel		*bch;
 
-	if (rq->adr.channel > 2)
+	if (rq->adr.channel == 0 || rq->adr.channel > 2)
 		return -EINVAL;
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 10446ab..9a6da6e 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1670,7 +1670,7 @@
 {
 	struct bchannel		*bch;
 
-	if (rq->adr.channel > 2)
+	if (rq->adr.channel == 0 || rq->adr.channel > 2)
 		return -EINVAL;
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index dd6de9f..c726e09 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -860,7 +860,7 @@
 {
 	struct bchannel *bch;
 
-	if (rq->adr.channel > 2)
+	if (rq->adr.channel == 0 || rq->adr.channel > 2)
 		return -EINVAL;
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 7f1e7ba..2183357 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1015,7 +1015,7 @@
 {
 	struct bchannel *bch;
 
-	if (rq->adr.channel > 2)
+	if (rq->adr.channel == 0 || rq->adr.channel > 2)
 		return -EINVAL;
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3d0dfa7..97e73e5 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -539,9 +539,6 @@
 	bitmap->events_cleared = bitmap->mddev->events;
 	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
 
-	bitmap->flags |= BITMAP_HOSTENDIAN;
-	sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
-
 	kunmap_atomic(sb);
 
 	return 0;
@@ -1788,7 +1785,9 @@
 		 * re-add of a missing device */
 		start = mddev->recovery_cp;
 
+	mutex_lock(&mddev->bitmap_info.mutex);
 	err = bitmap_init_from_disk(bitmap, start);
+	mutex_unlock(&mddev->bitmap_info.mutex);
 
 	if (err)
 		goto out;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b0fcc7d..fa211d8 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -198,6 +198,7 @@
 static int linear_run (struct mddev *mddev)
 {
 	struct linear_conf *conf;
+	int ret;
 
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
@@ -211,7 +212,13 @@
 	blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
 	mddev->queue->backing_dev_info.congested_fn = linear_congested;
 	mddev->queue->backing_dev_info.congested_data = mddev;
-	return md_integrity_register(mddev);
+
+	ret =  md_integrity_register(mddev);
+	if (ret) {
+		kfree(conf);
+		mddev->private = NULL;
+	}
+	return ret;
 }
 
 static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 6f31f55..de63a1f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -407,6 +407,8 @@
 	return array_sectors;
 }
 
+static int raid0_stop(struct mddev *mddev);
+
 static int raid0_run(struct mddev *mddev)
 {
 	struct r0conf *conf;
@@ -454,7 +456,12 @@
 
 	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
 	dump_zones(mddev);
-	return md_integrity_register(mddev);
+
+	ret = md_integrity_register(mddev);
+	if (ret)
+		raid0_stop(mddev);
+
+	return ret;
 }
 
 static int raid0_stop(struct mddev *mddev)
@@ -625,6 +632,7 @@
 static void *raid0_takeover_raid1(struct mddev *mddev)
 {
 	struct r0conf *priv_conf;
+	int chunksect;
 
 	/* Check layout:
 	 *  - (N - 1) mirror drives must be already faulty
@@ -635,10 +643,25 @@
 		return ERR_PTR(-EINVAL);
 	}
 
+	/*
+	 * a raid1 doesn't have the notion of chunk size, so
+	 * figure out the largest suitable size we can use.
+	 */
+	chunksect = 64 * 2; /* 64K by default */
+
+	/* The array must be an exact multiple of chunksize */
+	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
+		chunksect >>= 1;
+
+	if ((chunksect << 9) < PAGE_SIZE)
+		/* array size does not allow a suitable chunk size */
+		return ERR_PTR(-EINVAL);
+
 	/* Set new parameters */
 	mddev->new_level = 0;
 	mddev->new_layout = 0;
-	mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
+	mddev->new_chunk_sectors = chunksect;
+	mddev->chunk_sectors = chunksect;
 	mddev->delta_disks = 1 - mddev->raid_disks;
 	mddev->raid_disks = 1;
 	/* make sure it will be not marked as dirty */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a40a20..15dd59b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1712,6 +1712,7 @@
 	struct r1conf *conf = mddev->private;
 	int primary;
 	int i;
+	int vcnt;
 
 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -1721,9 +1722,9 @@
 			break;
 		}
 	r1_bio->read_disk = primary;
+	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		int j;
-		int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
 		struct bio *pbio = r1_bio->bios[primary];
 		struct bio *sbio = r1_bio->bios[i];
 		int size;
@@ -1738,7 +1739,7 @@
 				s = sbio->bi_io_vec[j].bv_page;
 				if (memcmp(page_address(p),
 					   page_address(s),
-					   PAGE_SIZE))
+					   sbio->bi_io_vec[j].bv_len))
 					break;
 			}
 		} else
@@ -2386,8 +2387,7 @@
 		int ok = 1;
 		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
 			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
-				struct md_rdev *rdev =
-					rcu_dereference(conf->mirrors[i].rdev);
+				struct md_rdev *rdev = conf->mirrors[i].rdev;
 				ok = rdev_set_badblocks(rdev, sector_nr,
 							min_bad, 0
 					) && ok;
@@ -2636,11 +2636,13 @@
 	return ERR_PTR(err);
 }
 
+static int stop(struct mddev *mddev);
 static int run(struct mddev *mddev)
 {
 	struct r1conf *conf;
 	int i;
 	struct md_rdev *rdev;
+	int ret;
 
 	if (mddev->level != 1) {
 		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
@@ -2705,7 +2707,11 @@
 		mddev->queue->backing_dev_info.congested_data = mddev;
 		blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
 	}
-	return md_integrity_register(mddev);
+
+	ret =  md_integrity_register(mddev);
+	if (ret)
+		stop(mddev);
+	return ret;
 }
 
 static int stop(struct mddev *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3540316..c8dbb84 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1788,6 +1788,7 @@
 	struct r10conf *conf = mddev->private;
 	int i, first;
 	struct bio *tbio, *fbio;
+	int vcnt;
 
 	atomic_set(&r10_bio->remaining, 1);
 
@@ -1802,10 +1803,10 @@
 	first = i;
 	fbio = r10_bio->devs[i].bio;
 
+	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
 	/* now find blocks with errors */
 	for (i=0 ; i < conf->copies ; i++) {
 		int  j, d;
-		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
 
 		tbio = r10_bio->devs[i].bio;
 
@@ -1821,7 +1822,7 @@
 			for (j = 0; j < vcnt; j++)
 				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
 					   page_address(tbio->bi_io_vec[j].bv_page),
-					   PAGE_SIZE))
+					   fbio->bi_io_vec[j].bv_len))
 					break;
 			if (j == vcnt)
 				continue;
@@ -1871,7 +1872,6 @@
 	 */
 	for (i = 0; i < conf->copies; i++) {
 		int j, d;
-		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
 
 		tbio = r10_bio->devs[i].repl_bio;
 		if (!tbio || !tbio->bi_end_io)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 23ac880..f351422 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2471,39 +2471,41 @@
 	int abort = 0;
 	int i;
 
-	md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
 	clear_bit(STRIPE_SYNCING, &sh->state);
 	s->syncing = 0;
 	s->replacing = 0;
 	/* There is nothing more to do for sync/check/repair.
+	 * Don't even need to abort as that is handled elsewhere
+	 * if needed, and not always wanted e.g. if there is a known
+	 * bad block here.
 	 * For recover/replace we need to record a bad block on all
 	 * non-sync devices, or abort the recovery
 	 */
-	if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
-		return;
-	/* During recovery devices cannot be removed, so locking and
-	 * refcounting of rdevs is not needed
-	 */
-	for (i = 0; i < conf->raid_disks; i++) {
-		struct md_rdev *rdev = conf->disks[i].rdev;
-		if (rdev
-		    && !test_bit(Faulty, &rdev->flags)
-		    && !test_bit(In_sync, &rdev->flags)
-		    && !rdev_set_badblocks(rdev, sh->sector,
-					   STRIPE_SECTORS, 0))
-			abort = 1;
-		rdev = conf->disks[i].replacement;
-		if (rdev
-		    && !test_bit(Faulty, &rdev->flags)
-		    && !test_bit(In_sync, &rdev->flags)
-		    && !rdev_set_badblocks(rdev, sh->sector,
-					   STRIPE_SECTORS, 0))
-			abort = 1;
+	if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
+		/* During recovery devices cannot be removed, so
+		 * locking and refcounting of rdevs is not needed
+		 */
+		for (i = 0; i < conf->raid_disks; i++) {
+			struct md_rdev *rdev = conf->disks[i].rdev;
+			if (rdev
+			    && !test_bit(Faulty, &rdev->flags)
+			    && !test_bit(In_sync, &rdev->flags)
+			    && !rdev_set_badblocks(rdev, sh->sector,
+						   STRIPE_SECTORS, 0))
+				abort = 1;
+			rdev = conf->disks[i].replacement;
+			if (rdev
+			    && !test_bit(Faulty, &rdev->flags)
+			    && !test_bit(In_sync, &rdev->flags)
+			    && !rdev_set_badblocks(rdev, sh->sector,
+						   STRIPE_SECTORS, 0))
+				abort = 1;
+		}
+		if (abort)
+			conf->recovery_disabled =
+				conf->mddev->recovery_disabled;
 	}
-	if (abort) {
-		conf->recovery_disabled = conf->mddev->recovery_disabled;
-		set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
-	}
+	md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
 }
 
 static int want_replace(struct stripe_head *sh, int disk_idx)
@@ -3203,7 +3205,8 @@
 			/* Not in-sync */;
 		else if (is_bad) {
 			/* also not in-sync */
-			if (!test_bit(WriteErrorSeen, &rdev->flags)) {
+			if (!test_bit(WriteErrorSeen, &rdev->flags) &&
+			    test_bit(R5_UPTODATE, &dev->flags)) {
 				/* treat as in-sync, but with a read error
 				 * which we can now try to correct
 				 */
@@ -3276,12 +3279,14 @@
 		/* If there is a failed device being replaced,
 		 *     we must be recovering.
 		 * else if we are after recovery_cp, we must be syncing
+		 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
 		 * else we can only be replacing
 		 * sync and recovery both need to read all devices, and so
 		 * use the same flag.
 		 */
 		if (do_recovery ||
-		    sh->sector >= conf->mddev->recovery_cp)
+		    sh->sector >= conf->mddev->recovery_cp ||
+		    test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
 			s->syncing = 1;
 		else
 			s->replacing = 1;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 4555baa..39696c6 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -143,10 +143,12 @@
 static void dvb_frontend_wakeup(struct dvb_frontend *fe);
 static int dtv_get_frontend(struct dvb_frontend *fe,
 			    struct dvb_frontend_parameters *p_out);
+static int dtv_property_legacy_params_sync(struct dvb_frontend *fe,
+					   struct dvb_frontend_parameters *p);
 
 static bool has_get_frontend(struct dvb_frontend *fe)
 {
-	return fe->ops.get_frontend;
+	return fe->ops.get_frontend != NULL;
 }
 
 /*
@@ -697,6 +699,7 @@
 					fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
 					fepriv->delay = HZ / 2;
 				}
+				dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
 				fe->ops.read_status(fe, &s);
 				if (s != fepriv->status) {
 					dvb_frontend_add_event(fe, s); /* update event list */
@@ -1833,6 +1836,13 @@
 		return -EINVAL;
 
 	/*
+	 * Initialize output parameters to match the values given by
+	 * the user. FE_SET_FRONTEND triggers an initial frontend event
+	 * with status = 0, which copies output parameters to userspace.
+	 */
+	dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
+
+	/*
 	 * Be sure that the bandwidth will be filled for all
 	 * non-satellite systems, as tuners need to know what
 	 * low pass/Nyquist half filter should be applied, in
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index 3b7b102..482d249 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -238,12 +238,27 @@
 
 static u32 it913x_query(struct usb_device *udev, u8 pro)
 {
-	int ret;
+	int ret, i;
 	u8 data[4];
-	ret = it913x_io(udev, READ_LONG, pro, CMD_DEMOD_READ,
-		0x1222, 0, &data[0], 3);
+	u8 ver;
 
-	it913x_config.chip_ver = data[0];
+	for (i = 0; i < 5; i++) {
+		ret = it913x_io(udev, READ_LONG, pro, CMD_DEMOD_READ,
+			0x1222, 0, &data[0], 3);
+		ver = data[0];
+		if (ver > 0 && ver < 3)
+			break;
+		msleep(100);
+	}
+
+	if (ver < 1 || ver > 2) {
+		info("Failed to identify chip version applying 1");
+		it913x_config.chip_ver = 0x1;
+		it913x_config.chip_type = 0x9135;
+		return 0;
+	}
+
+	it913x_config.chip_ver = ver;
 	it913x_config.chip_type = (u16)(data[2] << 8) + data[1];
 
 	info("Chip Version=%02x Chip Type=%04x", it913x_config.chip_ver,
@@ -660,30 +675,41 @@
 			if ((packet_size > min_pkt) || (i == fw->size)) {
 				fw_data = (u8 *)(fw->data + pos);
 				pos += packet_size;
-				if (packet_size > 0)
-					ret |= it913x_io(udev, WRITE_DATA,
+				if (packet_size > 0) {
+					ret = it913x_io(udev, WRITE_DATA,
 						DEV_0, CMD_SCATTER_WRITE, 0,
 						0, fw_data, packet_size);
+					if (ret < 0)
+						break;
+				}
 				udelay(1000);
 			}
 		}
 		i++;
 	}
 
-	ret |= it913x_io(udev, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
-
-	msleep(100);
-
 	if (ret < 0)
-		info("FRM Firmware Download Failed (%04x)" , ret);
+		info("FRM Firmware Download Failed (%d)" , ret);
 	else
 		info("FRM Firmware Download Completed - Resetting Device");
 
-	ret |= it913x_return_status(udev);
+	msleep(30);
+
+	ret = it913x_io(udev, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
+	if (ret < 0)
+		info("FRM Device not responding to reboot");
+
+	ret = it913x_return_status(udev);
+	if (ret == 0) {
+		info("FRM Failed to reboot device");
+		return -ENODEV;
+	}
 
 	msleep(30);
 
-	ret |= it913x_wr_reg(udev, DEV_0,  I2C_CLK, I2C_CLK_400);
+	ret = it913x_wr_reg(udev, DEV_0,  I2C_CLK, I2C_CLK_400);
+
+	msleep(30);
 
 	/* Tuner function */
 	if (it913x_config.dual_mode)
@@ -901,5 +927,5 @@
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("it913x USB 2 Driver");
-MODULE_VERSION("1.27");
+MODULE_VERSION("1.28");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 5452bee..989e556 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1763,13 +1763,13 @@
 		IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
 		if (iarg > AUDIO_STEREO_SWAPPED)
 			return -EINVAL;
-		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg);
+		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg + 1);
 
 	case AUDIO_BILINGUAL_CHANNEL_SELECT:
 		IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
 		if (iarg > AUDIO_STEREO_SWAPPED)
 			return -EINVAL;
-		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg);
+		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg + 1);
 
 	default:
 		return -EINVAL;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 4a44f9a..b76b0ac 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -468,22 +468,30 @@
 	spin_unlock_irqrestore(&stream->clock.lock, flags);
 }
 
+static void uvc_video_clock_reset(struct uvc_streaming *stream)
+{
+	struct uvc_clock *clock = &stream->clock;
+
+	clock->head = 0;
+	clock->count = 0;
+	clock->last_sof = -1;
+	clock->sof_offset = -1;
+}
+
 static int uvc_video_clock_init(struct uvc_streaming *stream)
 {
 	struct uvc_clock *clock = &stream->clock;
 
 	spin_lock_init(&clock->lock);
-	clock->head = 0;
-	clock->count = 0;
 	clock->size = 32;
-	clock->last_sof = -1;
-	clock->sof_offset = -1;
 
 	clock->samples = kmalloc(clock->size * sizeof(*clock->samples),
 				 GFP_KERNEL);
 	if (clock->samples == NULL)
 		return -ENOMEM;
 
+	uvc_video_clock_reset(stream);
+
 	return 0;
 }
 
@@ -1424,8 +1432,6 @@
 
 	if (free_buffers)
 		uvc_free_urb_buffers(stream);
-
-	uvc_video_clock_cleanup(stream);
 }
 
 /*
@@ -1555,10 +1561,6 @@
 
 	uvc_video_stats_start(stream);
 
-	ret = uvc_video_clock_init(stream);
-	if (ret < 0)
-		return ret;
-
 	if (intf->num_altsetting > 1) {
 		struct usb_host_endpoint *best_ep = NULL;
 		unsigned int best_psize = 3 * 1024;
@@ -1683,6 +1685,8 @@
 
 	stream->frozen = 0;
 
+	uvc_video_clock_reset(stream);
+
 	ret = uvc_commit_video(stream, &stream->ctrl);
 	if (ret < 0) {
 		uvc_queue_enable(&stream->queue, 0);
@@ -1819,25 +1823,35 @@
 		uvc_uninit_video(stream, 1);
 		usb_set_interface(stream->dev->udev, stream->intfnum, 0);
 		uvc_queue_enable(&stream->queue, 0);
+		uvc_video_clock_cleanup(stream);
 		return 0;
 	}
 
-	ret = uvc_queue_enable(&stream->queue, 1);
+	ret = uvc_video_clock_init(stream);
 	if (ret < 0)
 		return ret;
 
+	ret = uvc_queue_enable(&stream->queue, 1);
+	if (ret < 0)
+		goto error_queue;
+
 	/* Commit the streaming parameters. */
 	ret = uvc_commit_video(stream, &stream->ctrl);
-	if (ret < 0) {
-		uvc_queue_enable(&stream->queue, 0);
-		return ret;
-	}
+	if (ret < 0)
+		goto error_commit;
 
 	ret = uvc_init_video(stream, GFP_KERNEL);
-	if (ret < 0) {
-		usb_set_interface(stream->dev->udev, stream->intfnum, 0);
-		uvc_queue_enable(&stream->queue, 0);
-	}
+	if (ret < 0)
+		goto error_video;
+
+	return 0;
+
+error_video:
+	usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+error_commit:
+	uvc_queue_enable(&stream->queue, 0);
+error_queue:
+	uvc_video_clock_cleanup(stream);
 
 	return ret;
 }
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 3aa36eb..44a3fdb 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -262,13 +262,6 @@
 	return count;
 }
 
-static int aat2870_reg_open_file(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
 static ssize_t aat2870_reg_read_file(struct file *file, char __user *user_buf,
 				     size_t count, loff_t *ppos)
 {
@@ -330,7 +323,7 @@
 }
 
 static const struct file_operations aat2870_reg_fops = {
-	.open = aat2870_reg_open_file,
+	.open = simple_open,
 	.read = aat2870_reg_read_file,
 	.write = aat2870_reg_write_file,
 };
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 60107ee..1efad20 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -483,12 +483,6 @@
 	bool mode;
 };
 
-static int ab3100_get_set_reg_open_file(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t ab3100_get_set_reg(struct file *file,
 				  const char __user *user_buf,
 				  size_t count, loff_t *ppos)
@@ -583,7 +577,7 @@
 }
 
 static const struct file_operations ab3100_get_set_reg_fops = {
-	.open = ab3100_get_set_reg_open_file,
+	.open = simple_open,
 	.write = ab3100_get_set_reg,
 	.llseek = noop_llseek,
 };
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 1c034b8..6673e57 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -500,12 +500,6 @@
 	return 1;
 }
 
-static int remote_settings_file_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static int remote_settings_file_close(struct inode *inode, struct file *file)
 {
 	return 0;
@@ -600,7 +594,7 @@
 };
 
 static const struct file_operations remote_settings_fops = {
-	.open =		remote_settings_file_open,
+	.open =		simple_open,
 	.release =	remote_settings_file_close,
 	.read =		remote_settings_file_read,
 	.write =	remote_settings_file_write,
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3f7ad83..3aa9a96 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -134,12 +134,17 @@
 static int hwbreaks_ok;
 static int hw_break_val;
 static int hw_break_val2;
+static int cont_instead_of_sstep;
+static unsigned long cont_thread_id;
+static unsigned long sstep_thread_id;
 #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
 static int arch_needs_sstep_emulation = 1;
 #else
 static int arch_needs_sstep_emulation;
 #endif
+static unsigned long cont_addr;
 static unsigned long sstep_addr;
+static int restart_from_top_after_write;
 static int sstep_state;
 
 /* Storage for the registers, in GDB format. */
@@ -187,7 +192,8 @@
 	 */
 	while (!final_ack)
 		msleep_interruptible(1500);
-
+	/* Pause for any other threads to exit after final ack. */
+	msleep_interruptible(1000);
 	if (configured)
 		kgdb_unregister_io_module(&kgdbts_io_ops);
 	configured = 0;
@@ -211,7 +217,7 @@
 	if (!strcmp(arg, "kgdbts_break_test"))
 		addr = (unsigned long)kgdbts_break_test;
 	else if (!strcmp(arg, "sys_open"))
-		addr = (unsigned long)sys_open;
+		addr = (unsigned long)do_sys_open;
 	else if (!strcmp(arg, "do_fork"))
 		addr = (unsigned long)do_fork;
 	else if (!strcmp(arg, "hw_break_val"))
@@ -283,6 +289,16 @@
 	hw_break_val++;
 }
 
+static int get_thread_id_continue(char *put_str, char *arg)
+{
+	char *ptr = &put_str[11];
+
+	if (put_str[1] != 'T' || put_str[2] != '0')
+		return 1;
+	kgdb_hex2long(&ptr, &cont_thread_id);
+	return 0;
+}
+
 static int check_and_rewind_pc(char *put_str, char *arg)
 {
 	unsigned long addr = lookup_addr(arg);
@@ -299,13 +315,21 @@
 	if (addr + BREAK_INSTR_SIZE == ip)
 		offset = -BREAK_INSTR_SIZE;
 #endif
-	if (strcmp(arg, "silent") && ip + offset != addr) {
+
+	if (arch_needs_sstep_emulation && sstep_addr &&
+	    ip + offset == sstep_addr &&
+	    ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) {
+		/* This is special case for emulated single step */
+		v2printk("Emul: rewind hit single step bp\n");
+		restart_from_top_after_write = 1;
+	} else if (strcmp(arg, "silent") && ip + offset != addr) {
 		eprintk("kgdbts: BP mismatch %lx expected %lx\n",
 			   ip + offset, addr);
 		return 1;
 	}
 	/* Readjust the instruction pointer if needed */
 	ip += offset;
+	cont_addr = ip;
 #ifdef GDB_ADJUSTS_BREAK_OFFSET
 	instruction_pointer_set(&kgdbts_regs, ip);
 #endif
@@ -315,6 +339,8 @@
 static int check_single_step(char *put_str, char *arg)
 {
 	unsigned long addr = lookup_addr(arg);
+	static int matched_id;
+
 	/*
 	 * From an arch indepent point of view the instruction pointer
 	 * should be on a different instruction
@@ -324,6 +350,29 @@
 	gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
 	v2printk("Singlestep stopped at IP: %lx\n",
 		   instruction_pointer(&kgdbts_regs));
+
+	if (sstep_thread_id != cont_thread_id) {
+		/*
+		 * Ensure we stopped in the same thread id as before, else the
+		 * debugger should continue until the original thread that was
+		 * single stepped is scheduled again, emulating gdb's behavior.
+		 */
+		v2printk("ThrID does not match: %lx\n", cont_thread_id);
+		if (arch_needs_sstep_emulation) {
+			if (matched_id &&
+			    instruction_pointer(&kgdbts_regs) != addr)
+				goto continue_test;
+			matched_id++;
+			ts.idx -= 2;
+			sstep_state = 0;
+			return 0;
+		}
+		cont_instead_of_sstep = 1;
+		ts.idx -= 4;
+		return 0;
+	}
+continue_test:
+	matched_id = 0;
 	if (instruction_pointer(&kgdbts_regs) == addr) {
 		eprintk("kgdbts: SingleStep failed at %lx\n",
 			   instruction_pointer(&kgdbts_regs));
@@ -365,10 +414,40 @@
 	return 1;
 }
 
+static void get_cont_catch(char *arg)
+{
+	/* Always send detach because the test is completed at this point */
+	fill_get_buf("D");
+}
+
+static int put_cont_catch(char *put_str, char *arg)
+{
+	/* This is at the end of the test and we catch any and all input */
+	v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id);
+	ts.idx--;
+	return 0;
+}
+
+static int emul_reset(char *put_str, char *arg)
+{
+	if (strncmp(put_str, "$OK", 3))
+		return 1;
+	if (restart_from_top_after_write) {
+		restart_from_top_after_write = 0;
+		ts.idx = -1;
+	}
+	return 0;
+}
+
 static void emul_sstep_get(char *arg)
 {
 	if (!arch_needs_sstep_emulation) {
-		fill_get_buf(arg);
+		if (cont_instead_of_sstep) {
+			cont_instead_of_sstep = 0;
+			fill_get_buf("c");
+		} else {
+			fill_get_buf(arg);
+		}
 		return;
 	}
 	switch (sstep_state) {
@@ -398,9 +477,11 @@
 static int emul_sstep_put(char *put_str, char *arg)
 {
 	if (!arch_needs_sstep_emulation) {
-		if (!strncmp(put_str+1, arg, 2))
-			return 0;
-		return 1;
+		char *ptr = &put_str[11];
+		if (put_str[1] != 'T' || put_str[2] != '0')
+			return 1;
+		kgdb_hex2long(&ptr, &sstep_thread_id);
+		return 0;
 	}
 	switch (sstep_state) {
 	case 1:
@@ -411,8 +492,7 @@
 		v2printk("Stopped at IP: %lx\n",
 			 instruction_pointer(&kgdbts_regs));
 		/* Want to stop at IP + break instruction size by default */
-		sstep_addr = instruction_pointer(&kgdbts_regs) +
-			BREAK_INSTR_SIZE;
+		sstep_addr = cont_addr + BREAK_INSTR_SIZE;
 		break;
 	case 2:
 		if (strncmp(put_str, "$OK", 3)) {
@@ -424,6 +504,9 @@
 		if (strncmp(put_str, "$T0", 3)) {
 			eprintk("kgdbts: failed continue sstep\n");
 			return 1;
+		} else {
+			char *ptr = &put_str[11];
+			kgdb_hex2long(&ptr, &sstep_thread_id);
 		}
 		break;
 	case 4:
@@ -502,10 +585,10 @@
 static struct test_struct singlestep_break_test[] = {
 	{ "?", "S0*" }, /* Clear break points */
 	{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
-	{ "c", "T0*", }, /* Continue */
+	{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+	{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
 	{ "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
 	{ "write", "OK", write_regs }, /* Write registers */
-	{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
 	{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
 	{ "g", "kgdbts_break_test", NULL, check_single_step },
 	{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
@@ -523,16 +606,16 @@
 static struct test_struct do_fork_test[] = {
 	{ "?", "S0*" }, /* Clear break points */
 	{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
-	{ "c", "T0*", }, /* Continue */
-	{ "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
-	{ "write", "OK", write_regs }, /* Write registers */
+	{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
 	{ "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
+	{ "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
+	{ "write", "OK", write_regs, emul_reset }, /* Write registers */
 	{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
 	{ "g", "do_fork", NULL, check_single_step },
 	{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
 	{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
 	{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
-	{ "", "" },
+	{ "", "", get_cont_catch, put_cont_catch },
 };
 
 /* Test for hitting a breakpoint at sys_open for what ever the number
@@ -541,16 +624,16 @@
 static struct test_struct sys_open_test[] = {
 	{ "?", "S0*" }, /* Clear break points */
 	{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
-	{ "c", "T0*", }, /* Continue */
-	{ "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
-	{ "write", "OK", write_regs }, /* Write registers */
+	{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
 	{ "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
+	{ "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
+	{ "write", "OK", write_regs, emul_reset }, /* Write registers */
 	{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
 	{ "g", "sys_open", NULL, check_single_step },
 	{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
 	{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
 	{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
-	{ "", "" },
+	{ "", "", get_cont_catch, put_cont_catch },
 };
 
 /*
@@ -693,8 +776,8 @@
 	/* This callback is a put char which is when kgdb sends data to
 	 * this I/O module.
 	 */
-	if (ts.tst[ts.idx].get[0] == '\0' &&
-		ts.tst[ts.idx].put[0] == '\0') {
+	if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' &&
+	    !ts.tst[ts.idx].get_handler) {
 		eprintk("kgdbts: ERROR: beyond end of test on"
 			   " '%s' line %i\n", ts.name, ts.idx);
 		return 0;
@@ -907,6 +990,17 @@
 	if (ptr)
 		sstep_test = simple_strtol(ptr+1, NULL, 10);
 
+	/* All HW break point tests */
+	if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
+		hwbreaks_ok = 1;
+		v1printk("kgdbts:RUN hw breakpoint test\n");
+		run_breakpoint_test(1);
+		v1printk("kgdbts:RUN hw write breakpoint test\n");
+		run_hw_break_test(1);
+		v1printk("kgdbts:RUN access write breakpoint test\n");
+		run_hw_break_test(0);
+	}
+
 	/* required internal KGDB tests */
 	v1printk("kgdbts:RUN plant and detach test\n");
 	run_plant_and_detach_test(0);
@@ -924,35 +1018,11 @@
 
 	/* ===Optional tests=== */
 
-	/* All HW break point tests */
-	if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
-		hwbreaks_ok = 1;
-		v1printk("kgdbts:RUN hw breakpoint test\n");
-		run_breakpoint_test(1);
-		v1printk("kgdbts:RUN hw write breakpoint test\n");
-		run_hw_break_test(1);
-		v1printk("kgdbts:RUN access write breakpoint test\n");
-		run_hw_break_test(0);
-	}
-
 	if (nmi_sleep) {
 		v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
 		run_nmi_sleep_test(nmi_sleep);
 	}
 
-#ifdef CONFIG_DEBUG_RODATA
-	/* Until there is an api to write to read-only text segments, use
-	 * HW breakpoints for the remainder of any tests, else print a
-	 * failure message if hw breakpoints do not work.
-	 */
-	if (!(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT && hwbreaks_ok)) {
-		eprintk("kgdbts: HW breakpoints do not work,"
-			"skipping remaining tests\n");
-		return;
-	}
-	force_hwbrks = 1;
-#endif /* CONFIG_DEBUG_RODATA */
-
 	/* If the do_fork test is run it will be the last test that is
 	 * executed because a kernel thread will be spawned at the very
 	 * end to unregister the debug hooks.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index eed213a..b1809650 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1623,24 +1623,6 @@
 	return ret;
 }
 
-static int
-mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
-{
-	int err;
-
-	mmc_claim_host(card->host);
-	err = mmc_set_blocklen(card, 512);
-	mmc_release_host(card->host);
-
-	if (err) {
-		pr_err("%s: unable to set block size to 512: %d\n",
-			md->disk->disk_name, err);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static void mmc_blk_remove_req(struct mmc_blk_data *md)
 {
 	struct mmc_card *card;
@@ -1768,7 +1750,6 @@
 static int mmc_blk_probe(struct mmc_card *card)
 {
 	struct mmc_blk_data *md, *part_md;
-	int err;
 	char cap_str[10];
 
 	/*
@@ -1781,10 +1762,6 @@
 	if (IS_ERR(md))
 		return PTR_ERR(md);
 
-	err = mmc_blk_set_blksize(md, card);
-	if (err)
-		goto out;
-
 	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
 			cap_str, sizeof(cap_str));
 	pr_info("%s: %s %s %s %s\n",
@@ -1809,7 +1786,7 @@
  out:
 	mmc_blk_remove_parts(card, md);
 	mmc_blk_remove_req(md);
-	return err;
+	return 0;
 }
 
 static void mmc_blk_remove(struct mmc_card *card)
@@ -1845,8 +1822,6 @@
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
 
 	if (md) {
-		mmc_blk_set_blksize(md, card);
-
 		/*
 		 * Resume involves the card going into idle state,
 		 * so current partition is always the main one.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 5d011a3..3f60606 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -267,6 +267,15 @@
 {
 	int ret;
 	const char *type;
+	const char *uhs_bus_speed_mode = "";
+	static const char *const uhs_speeds[] = {
+		[UHS_SDR12_BUS_SPEED] = "SDR12 ",
+		[UHS_SDR25_BUS_SPEED] = "SDR25 ",
+		[UHS_SDR50_BUS_SPEED] = "SDR50 ",
+		[UHS_SDR104_BUS_SPEED] = "SDR104 ",
+		[UHS_DDR50_BUS_SPEED] = "DDR50 ",
+	};
+
 
 	dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
 
@@ -296,6 +305,10 @@
 		break;
 	}
 
+	if (mmc_sd_card_uhs(card) &&
+		(card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
+		uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
+
 	if (mmc_host_is_spi(card->host)) {
 		pr_info("%s: new %s%s%s card on SPI\n",
 			mmc_hostname(card->host),
@@ -303,13 +316,13 @@
 			mmc_card_ddr_mode(card) ? "DDR " : "",
 			type);
 	} else {
-		pr_info("%s: new %s%s%s%s card at address %04x\n",
+		pr_info("%s: new %s%s%s%s%s card at address %04x\n",
 			mmc_hostname(card->host),
 			mmc_card_uhs(card) ? "ultra high speed " :
 			(mmc_card_highspeed(card) ? "high speed " : ""),
 			(mmc_card_hs200(card) ? "HS200 " : ""),
 			mmc_card_ddr_mode(card) ? "DDR " : "",
-			type, card->rca);
+			uhs_bus_speed_mode, type, card->rca);
 	}
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 14f262e..7474c47 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -527,10 +527,14 @@
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
-			 * The limit is really 250 ms, but that is
-			 * insufficient for some crappy cards.
+			 * The MMC spec "It is strongly recommended
+			 * for hosts to implement more than 500ms
+			 * timeout value even if the card indicates
+			 * the 250ms maximum busy length."  Even the
+			 * previous value of 300ms is known to be
+			 * insufficient for some cards.
 			 */
-			limit_us = 300000;
+			limit_us = 3000000;
 		else
 			limit_us = 100000;
 
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 02914d6..54df5ad 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -695,6 +695,11 @@
 		else if (host->ios.clock <= 200000000)
 			index = EXT_CSD_PWR_CL_200_195;
 		break;
+	case MMC_VDD_27_28:
+	case MMC_VDD_28_29:
+	case MMC_VDD_29_30:
+	case MMC_VDD_30_31:
+	case MMC_VDD_31_32:
 	case MMC_VDD_32_33:
 	case MMC_VDD_33_34:
 	case MMC_VDD_34_35:
@@ -1111,11 +1116,10 @@
 		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
 				EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
 		err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
-		if (err) {
-			pr_err("%s: power class selection to bus width %d failed\n",
-				mmc_hostname(card->host), 1 << bus_width);
-			goto err;
-		}
+		if (err)
+			pr_warning("%s: power class selection to bus width %d"
+				   " failed\n", mmc_hostname(card->host),
+				   1 << bus_width);
 	}
 
 	/*
@@ -1147,10 +1151,10 @@
 			err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
 						    ext_csd);
 			if (err)
-				pr_err("%s: power class selection to "
-				       "bus width %d failed\n",
-				       mmc_hostname(card->host),
-				       1 << bus_width);
+				pr_warning("%s: power class selection to "
+					   "bus width %d failed\n",
+					   mmc_hostname(card->host),
+					   1 << bus_width);
 
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 EXT_CSD_BUS_WIDTH,
@@ -1178,10 +1182,10 @@
 			err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
 						    ext_csd);
 			if (err)
-				pr_err("%s: power class selection to "
-				       "bus width %d ddr %d failed\n",
-				       mmc_hostname(card->host),
-				       1 << bus_width, ddr);
+				pr_warning("%s: power class selection to "
+					   "bus width %d ddr %d failed\n",
+					   mmc_hostname(card->host),
+					   1 << bus_width, ddr);
 
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 EXT_CSD_BUS_WIDTH,
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 40989e6..236842e 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -192,9 +192,15 @@
 	return ret;
 }
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
+
+static int pm_no_operation(struct device *dev)
+{
+	return 0;
+}
 
 static const struct dev_pm_ops sdio_bus_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation)
 	SET_RUNTIME_PM_OPS(
 		pm_generic_runtime_suspend,
 		pm_generic_runtime_resume,
@@ -204,11 +210,11 @@
 
 #define SDIO_PM_OPS_PTR	(&sdio_bus_pm_ops)
 
-#else /* !CONFIG_PM_RUNTIME */
+#else /* !CONFIG_PM */
 
 #define SDIO_PM_OPS_PTR	NULL
 
-#endif /* !CONFIG_PM_RUNTIME */
+#endif /* !CONFIG_PM */
 
 static struct bus_type sdio_bus_type = {
 	.name		= "sdio",
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 000b3ad..787aba1 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -31,6 +31,7 @@
 # define ATMCI_MR_PDCFBYTE		(  1 << 13)	/* Force Byte Transfer */
 # define ATMCI_MR_PDCPADV		(  1 << 14)	/* Padding Value */
 # define ATMCI_MR_PDCMODE		(  1 << 15)	/* PDC-oriented Mode */
+# define ATMCI_MR_CLKODD(x)		((x) << 16)	/* LSB of Clock Divider */
 #define ATMCI_DTOR			0x0008	/* Data Timeout */
 # define ATMCI_DTOCYC(x)		((x) <<  0)	/* Data Timeout Cycles */
 # define ATMCI_DTOMUL(x)		((x) <<  4)	/* Data Timeout Multiplier */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9819dc0..e94476b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -77,6 +77,7 @@
 	bool    has_cstor_reg;
 	bool    has_highspeed;
 	bool    has_rwproof;
+	bool	has_odd_clk_div;
 };
 
 struct atmel_mci_dma {
@@ -482,7 +483,14 @@
 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
 					unsigned int ns)
 {
-	return (ns * (host->bus_hz / 1000000) + 999) / 1000;
+	/*
+	 * It is easier here to use us instead of ns for the timeout,
+	 * it prevents from overflows during calculation.
+	 */
+	unsigned int us = DIV_ROUND_UP(ns, 1000);
+
+	/* Maximum clock frequency is host->bus_hz/2 */
+	return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
 }
 
 static void atmci_set_timeout(struct atmel_mci *host,
@@ -1127,16 +1135,27 @@
 		}
 
 		/* Calculate clock divider */
-		clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
-		if (clkdiv > 255) {
-			dev_warn(&mmc->class_dev,
-				"clock %u too slow; using %lu\n",
-				clock_min, host->bus_hz / (2 * 256));
-			clkdiv = 255;
+		if (host->caps.has_odd_clk_div) {
+			clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
+			if (clkdiv > 511) {
+				dev_warn(&mmc->class_dev,
+				         "clock %u too slow; using %lu\n",
+				         clock_min, host->bus_hz / (511 + 2));
+				clkdiv = 511;
+			}
+			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
+			                 | ATMCI_MR_CLKODD(clkdiv & 1);
+		} else {
+			clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
+			if (clkdiv > 255) {
+				dev_warn(&mmc->class_dev,
+				         "clock %u too slow; using %lu\n",
+				         clock_min, host->bus_hz / (2 * 256));
+				clkdiv = 255;
+			}
+			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
 		}
 
-		host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
-
 		/*
 		 * WRPROOF and RDPROOF prevent overruns/underruns by
 		 * stopping the clock when the FIFO is full/empty.
@@ -2007,35 +2026,35 @@
 			"version: 0x%x\n", version);
 
 	host->caps.has_dma = 0;
-	host->caps.has_pdc = 0;
+	host->caps.has_pdc = 1;
 	host->caps.has_cfg_reg = 0;
 	host->caps.has_cstor_reg = 0;
 	host->caps.has_highspeed = 0;
 	host->caps.has_rwproof = 0;
+	host->caps.has_odd_clk_div = 0;
 
 	/* keep only major version number */
 	switch (version & 0xf00) {
-	case 0x100:
-	case 0x200:
-		host->caps.has_pdc = 1;
-		host->caps.has_rwproof = 1;
-		break;
-	case 0x300:
-	case 0x400:
 	case 0x500:
+		host->caps.has_odd_clk_div = 1;
+	case 0x400:
+	case 0x300:
 #ifdef CONFIG_AT_HDMAC
 		host->caps.has_dma = 1;
 #else
-		host->caps.has_dma = 0;
 		dev_info(&host->pdev->dev,
 			"has dma capability but dma engine is not selected, then use pio\n");
 #endif
+		host->caps.has_pdc = 0;
 		host->caps.has_cfg_reg = 1;
 		host->caps.has_cstor_reg = 1;
 		host->caps.has_highspeed = 1;
+	case 0x200:
 		host->caps.has_rwproof = 1;
+	case 0x100:
 		break;
 	default:
+		host->caps.has_pdc = 0;
 		dev_warn(&host->pdev->dev,
 				"Unmanaged mci version, set minimum capabilities\n");
 		break;
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 65f36cf..b0f2ef9 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -38,10 +38,10 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/module.h>
+#include <linux/fsl/mxs-dma.h>
 
 #include <mach/mxs.h>
 #include <mach/common.h>
-#include <mach/dma.h>
 #include <mach/mmc.h>
 
 #define DRIVER_NAME	"mxs-mmc"
@@ -305,7 +305,7 @@
 }
 
 static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
-	struct mxs_mmc_host *host, unsigned int append)
+	struct mxs_mmc_host *host, unsigned long flags)
 {
 	struct dma_async_tx_descriptor *desc;
 	struct mmc_data *data = host->data;
@@ -325,7 +325,7 @@
 	}
 
 	desc = dmaengine_prep_slave_sg(host->dmach,
-				sgl, sg_len, host->slave_dirn, append);
+				sgl, sg_len, host->slave_dirn, flags);
 	if (desc) {
 		desc->callback = mxs_mmc_dma_irq_callback;
 		desc->callback_param = host;
@@ -358,7 +358,7 @@
 	host->ssp_pio_words[2] = cmd1;
 	host->dma_dir = DMA_NONE;
 	host->slave_dirn = DMA_TRANS_NONE;
-	desc = mxs_mmc_prep_dma(host, 0);
+	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
 	if (!desc)
 		goto out;
 
@@ -398,7 +398,7 @@
 	host->ssp_pio_words[2] = cmd1;
 	host->dma_dir = DMA_NONE;
 	host->slave_dirn = DMA_TRANS_NONE;
-	desc = mxs_mmc_prep_dma(host, 0);
+	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
 	if (!desc)
 		goto out;
 
@@ -526,7 +526,7 @@
 	host->data = data;
 	host->dma_dir = dma_data_dir;
 	host->slave_dirn = slave_dirn;
-	desc = mxs_mmc_prep_dma(host, 1);
+	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!desc)
 		goto out;
 
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 47adb16..5c2b1c1 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1785,7 +1785,7 @@
 }
 #endif
 
-static int __init omap_hsmmc_probe(struct platform_device *pdev)
+static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 {
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
 	struct mmc_host *mmc;
@@ -1818,8 +1818,6 @@
 	if (res == NULL || irq < 0)
 		return -ENXIO;
 
-	res->start += pdata->reg_offset;
-	res->end += pdata->reg_offset;
 	res = request_mem_region(res->start, resource_size(res), pdev->name);
 	if (res == NULL)
 		return -EBUSY;
@@ -1843,7 +1841,7 @@
 	host->dma_ch	= -1;
 	host->irq	= irq;
 	host->slot_id	= 0;
-	host->mapbase	= res->start;
+	host->mapbase	= res->start + pdata->reg_offset;
 	host->base	= ioremap(host->mapbase, SZ_4K);
 	host->power_mode = MMC_POWER_OFF;
 	host->next_data.cookie = 1;
@@ -1875,8 +1873,6 @@
 		goto err1;
 	}
 
-	omap_hsmmc_context_save(host);
-
 	if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
 		dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
 		mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
@@ -1887,6 +1883,8 @@
 	pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
 	pm_runtime_use_autosuspend(host->dev);
 
+	omap_hsmmc_context_save(host);
+
 	if (cpu_is_omap2430()) {
 		host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
 		/*
@@ -2018,8 +2016,7 @@
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
-	pm_runtime_mark_last_busy(host->dev);
-	pm_runtime_put_autosuspend(host->dev);
+	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
 	if (host->got_dbclk) {
@@ -2037,35 +2034,33 @@
 	return ret;
 }
 
-static int omap_hsmmc_remove(struct platform_device *pdev)
+static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
 {
 	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
 	struct resource *res;
 
-	if (host) {
-		pm_runtime_get_sync(host->dev);
-		mmc_remove_host(host->mmc);
-		if (host->use_reg)
-			omap_hsmmc_reg_put(host);
-		if (host->pdata->cleanup)
-			host->pdata->cleanup(&pdev->dev);
-		free_irq(host->irq, host);
-		if (mmc_slot(host).card_detect_irq)
-			free_irq(mmc_slot(host).card_detect_irq, host);
+	pm_runtime_get_sync(host->dev);
+	mmc_remove_host(host->mmc);
+	if (host->use_reg)
+		omap_hsmmc_reg_put(host);
+	if (host->pdata->cleanup)
+		host->pdata->cleanup(&pdev->dev);
+	free_irq(host->irq, host);
+	if (mmc_slot(host).card_detect_irq)
+		free_irq(mmc_slot(host).card_detect_irq, host);
 
-		pm_runtime_put_sync(host->dev);
-		pm_runtime_disable(host->dev);
-		clk_put(host->fclk);
-		if (host->got_dbclk) {
-			clk_disable(host->dbclk);
-			clk_put(host->dbclk);
-		}
-
-		mmc_free_host(host->mmc);
-		iounmap(host->base);
-		omap_hsmmc_gpio_free(pdev->dev.platform_data);
+	pm_runtime_put_sync(host->dev);
+	pm_runtime_disable(host->dev);
+	clk_put(host->fclk);
+	if (host->got_dbclk) {
+		clk_disable(host->dbclk);
+		clk_put(host->dbclk);
 	}
 
+	mmc_free_host(host->mmc);
+	iounmap(host->base);
+	omap_hsmmc_gpio_free(pdev->dev.platform_data);
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res)
 		release_mem_region(res->start, resource_size(res));
@@ -2078,49 +2073,45 @@
 static int omap_hsmmc_suspend(struct device *dev)
 {
 	int ret = 0;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+	if (!host)
+		return 0;
 
 	if (host && host->suspended)
 		return 0;
 
-	if (host) {
-		pm_runtime_get_sync(host->dev);
-		host->suspended = 1;
-		if (host->pdata->suspend) {
-			ret = host->pdata->suspend(&pdev->dev,
-							host->slot_id);
-			if (ret) {
-				dev_dbg(mmc_dev(host->mmc),
-					"Unable to handle MMC board"
-					" level suspend\n");
-				host->suspended = 0;
-				return ret;
-			}
-		}
-		ret = mmc_suspend_host(host->mmc);
-
+	pm_runtime_get_sync(host->dev);
+	host->suspended = 1;
+	if (host->pdata->suspend) {
+		ret = host->pdata->suspend(dev, host->slot_id);
 		if (ret) {
+			dev_dbg(dev, "Unable to handle MMC board"
+					" level suspend\n");
 			host->suspended = 0;
-			if (host->pdata->resume) {
-				ret = host->pdata->resume(&pdev->dev,
-							  host->slot_id);
-				if (ret)
-					dev_dbg(mmc_dev(host->mmc),
-						"Unmask interrupt failed\n");
-			}
-			goto err;
+			return ret;
 		}
-
-		if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
-			omap_hsmmc_disable_irq(host);
-			OMAP_HSMMC_WRITE(host->base, HCTL,
-				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
-		}
-		if (host->got_dbclk)
-			clk_disable(host->dbclk);
-
 	}
+	ret = mmc_suspend_host(host->mmc);
+
+	if (ret) {
+		host->suspended = 0;
+		if (host->pdata->resume) {
+			ret = host->pdata->resume(dev, host->slot_id);
+			if (ret)
+				dev_dbg(dev, "Unmask interrupt failed\n");
+		}
+		goto err;
+	}
+
+	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
+		omap_hsmmc_disable_irq(host);
+		OMAP_HSMMC_WRITE(host->base, HCTL,
+				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+	}
+
+	if (host->got_dbclk)
+		clk_disable(host->dbclk);
 err:
 	pm_runtime_put_sync(host->dev);
 	return ret;
@@ -2130,39 +2121,38 @@
 static int omap_hsmmc_resume(struct device *dev)
 {
 	int ret = 0;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+	if (!host)
+		return 0;
 
 	if (host && !host->suspended)
 		return 0;
 
-	if (host) {
-		pm_runtime_get_sync(host->dev);
+	pm_runtime_get_sync(host->dev);
 
-		if (host->got_dbclk)
-			clk_enable(host->dbclk);
+	if (host->got_dbclk)
+		clk_enable(host->dbclk);
 
-		if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
-			omap_hsmmc_conf_bus_power(host);
+	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
+		omap_hsmmc_conf_bus_power(host);
 
-		if (host->pdata->resume) {
-			ret = host->pdata->resume(&pdev->dev, host->slot_id);
-			if (ret)
-				dev_dbg(mmc_dev(host->mmc),
-					"Unmask interrupt failed\n");
-		}
-
-		omap_hsmmc_protect_card(host);
-
-		/* Notify the core to resume the host */
-		ret = mmc_resume_host(host->mmc);
-		if (ret == 0)
-			host->suspended = 0;
-
-		pm_runtime_mark_last_busy(host->dev);
-		pm_runtime_put_autosuspend(host->dev);
+	if (host->pdata->resume) {
+		ret = host->pdata->resume(dev, host->slot_id);
+		if (ret)
+			dev_dbg(dev, "Unmask interrupt failed\n");
 	}
 
+	omap_hsmmc_protect_card(host);
+
+	/* Notify the core to resume the host */
+	ret = mmc_resume_host(host->mmc);
+	if (ret == 0)
+		host->suspended = 0;
+
+	pm_runtime_mark_last_busy(host->dev);
+	pm_runtime_put_autosuspend(host->dev);
+
 	return ret;
 
 }
@@ -2178,7 +2168,7 @@
 
 	host = platform_get_drvdata(to_platform_device(dev));
 	omap_hsmmc_context_save(host);
-	dev_dbg(mmc_dev(host->mmc), "disabled\n");
+	dev_dbg(dev, "disabled\n");
 
 	return 0;
 }
@@ -2189,7 +2179,7 @@
 
 	host = platform_get_drvdata(to_platform_device(dev));
 	omap_hsmmc_context_restore(host);
-	dev_dbg(mmc_dev(host->mmc), "enabled\n");
+	dev_dbg(dev, "enabled\n");
 
 	return 0;
 }
@@ -2202,7 +2192,8 @@
 };
 
 static struct platform_driver omap_hsmmc_driver = {
-	.remove		= omap_hsmmc_remove,
+	.probe		= omap_hsmmc_probe,
+	.remove		= __devexit_p(omap_hsmmc_remove),
 	.driver		= {
 		.name = DRIVER_NAME,
 		.owner = THIS_MODULE,
@@ -2211,21 +2202,7 @@
 	},
 };
 
-static int __init omap_hsmmc_init(void)
-{
-	/* Register the MMC driver */
-	return platform_driver_probe(&omap_hsmmc_driver, omap_hsmmc_probe);
-}
-
-static void __exit omap_hsmmc_cleanup(void)
-{
-	/* Unregister MMC driver */
-	platform_driver_unregister(&omap_hsmmc_driver);
-}
-
-module_init(omap_hsmmc_init);
-module_exit(omap_hsmmc_cleanup);
-
+module_platform_driver(omap_hsmmc_driver);
 MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 46fd1fd..177f697 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -20,6 +20,7 @@
  */
 
 #include <linux/io.h>
+#include <linux/module.h>
 #include <linux/mmc/host.h>
 
 #include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index fbbebe2..69ef0be 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -1418,8 +1418,6 @@
 
 	slots = chip->num_slots;	/* Quirk may have changed this */
 
-	pci_enable_msi(pdev);
-
 	for (i = 0; i < slots; i++) {
 		slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
 		if (IS_ERR(slot)) {
@@ -1438,8 +1436,6 @@
 	return 0;
 
 free:
-	pci_disable_msi(pdev);
-
 	pci_set_drvdata(pdev, NULL);
 	kfree(chip);
 
@@ -1462,8 +1458,6 @@
 		for (i = 0; i < chip->num_slots; i++)
 			sdhci_pci_remove_slot(chip->slots[i]);
 
-		pci_disable_msi(pdev);
-
 		pci_set_drvdata(pdev, NULL);
 		kfree(chip);
 	}
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index b19e7d4..55a164f 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -20,6 +20,10 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/mmc/host.h>
 
@@ -53,6 +57,18 @@
 	struct clk		*clk_bus[MAX_BUS_CLK];
 };
 
+/**
+ * struct sdhci_s3c_driver_data - S3C SDHCI platform specific driver data
+ * @sdhci_quirks: sdhci host specific quirks.
+ *
+ * Specifies platform specific configuration of sdhci controller.
+ * Note: A structure for driver specific platform data is used for future
+ * expansion of its usage.
+ */
+struct sdhci_s3c_drv_data {
+	unsigned int	sdhci_quirks;
+};
+
 static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
 {
 	return sdhci_priv(host);
@@ -132,10 +148,10 @@
 		return UINT_MAX;
 
 	/*
-	 * Clock divider's step is different as 1 from that of host controller
-	 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
+	 * If controller uses a non-standard clock division, find the best clock
+	 * speed possible with selected clock source and skip the division.
 	 */
-	if (ourhost->pdata->clk_type) {
+	if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
 		rate = clk_round_rate(clksrc, wanted);
 		return wanted - rate;
 	}
@@ -272,6 +288,8 @@
 static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
 {
 	struct sdhci_s3c *ourhost = to_s3c(host);
+	unsigned long timeout;
+	u16 clk = 0;
 
 	/* don't bother if the clock is going off */
 	if (clock == 0)
@@ -282,6 +300,25 @@
 	clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
 
 	host->clock = clock;
+
+	clk = SDHCI_CLOCK_INT_EN;
+	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+	/* Wait max 20 ms */
+	timeout = 20;
+	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+		& SDHCI_CLOCK_INT_STABLE)) {
+		if (timeout == 0) {
+			printk(KERN_ERR "%s: Internal clock never "
+				"stabilised.\n", mmc_hostname(host->mmc));
+			return;
+		}
+		timeout--;
+		mdelay(1);
+	}
+
+	clk |= SDHCI_CLOCK_CARD_EN;
+	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 }
 
 /**
@@ -382,16 +419,24 @@
 	}
 }
 
+static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
+			struct platform_device *pdev)
+{
+	return (struct sdhci_s3c_drv_data *)
+			platform_get_device_id(pdev)->driver_data;
+}
+
 static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
 {
-	struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
+	struct s3c_sdhci_platdata *pdata;
+	struct sdhci_s3c_drv_data *drv_data;
 	struct device *dev = &pdev->dev;
 	struct sdhci_host *host;
 	struct sdhci_s3c *sc;
 	struct resource *res;
 	int ret, irq, ptr, clks;
 
-	if (!pdata) {
+	if (!pdev->dev.platform_data) {
 		dev_err(dev, "no device data specified\n");
 		return -ENOENT;
 	}
@@ -402,18 +447,20 @@
 		return irq;
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "no memory specified\n");
-		return -ENOENT;
-	}
-
 	host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
 	if (IS_ERR(host)) {
 		dev_err(dev, "sdhci_alloc_host() failed\n");
 		return PTR_ERR(host);
 	}
 
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		ret = -ENOMEM;
+		goto err_io_clk;
+	}
+	memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+
+	drv_data = sdhci_s3c_get_driver_data(pdev);
 	sc = sdhci_priv(host);
 
 	sc->host = host;
@@ -464,15 +511,8 @@
 		goto err_no_busclks;
 	}
 
-	sc->ioarea = request_mem_region(res->start, resource_size(res),
-					mmc_hostname(host->mmc));
-	if (!sc->ioarea) {
-		dev_err(dev, "failed to reserve register area\n");
-		ret = -ENXIO;
-		goto err_req_regs;
-	}
-
-	host->ioaddr = ioremap_nocache(res->start, resource_size(res));
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	host->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
 	if (!host->ioaddr) {
 		dev_err(dev, "failed to map registers\n");
 		ret = -ENXIO;
@@ -491,6 +531,8 @@
 	/* Setup quirks for the controller */
 	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
 	host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+	if (drv_data)
+		host->quirks |= drv_data->sdhci_quirks;
 
 #ifndef CONFIG_MMC_SDHCI_S3C_DMA
 
@@ -518,6 +560,14 @@
 	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
 		host->mmc->caps = MMC_CAP_NONREMOVABLE;
 
+	switch (pdata->max_width) {
+	case 8:
+		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+	case 4:
+		host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+		break;
+	}
+
 	if (pdata->pm_caps)
 		host->mmc->pm_caps |= pdata->pm_caps;
 
@@ -531,7 +581,7 @@
 	 * If controller does not have internal clock divider,
 	 * we can use overriding functions instead of default.
 	 */
-	if (pdata->clk_type) {
+	if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
 		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
 		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
 		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
@@ -544,10 +594,17 @@
 	if (pdata->host_caps2)
 		host->mmc->caps2 |= pdata->host_caps2;
 
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_suspend_ignore_children(&pdev->dev, 1);
+
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_err(dev, "sdhci_add_host() failed\n");
-		goto err_add_host;
+		pm_runtime_forbid(&pdev->dev);
+		pm_runtime_get_noresume(&pdev->dev);
+		goto err_req_regs;
 	}
 
 	/* The following two methods of card detection might call
@@ -561,10 +618,6 @@
 
 	return 0;
 
- err_add_host:
-	release_resource(sc->ioarea);
-	kfree(sc->ioarea);
-
  err_req_regs:
 	for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
 		if (sc->clk_bus[ptr]) {
@@ -601,6 +654,8 @@
 
 	sdhci_remove_host(host, 1);
 
+	pm_runtime_disable(&pdev->dev);
+
 	for (ptr = 0; ptr < 3; ptr++) {
 		if (sc->clk_bus[ptr]) {
 			clk_disable(sc->clk_bus[ptr]);
@@ -610,18 +665,13 @@
 	clk_disable(sc->clk_io);
 	clk_put(sc->clk_io);
 
-	iounmap(host->ioaddr);
-	release_resource(sc->ioarea);
-	kfree(sc->ioarea);
-
 	sdhci_free_host(host);
 	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
 
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
 static int sdhci_s3c_suspend(struct device *dev)
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
@@ -635,10 +685,29 @@
 
 	return sdhci_resume_host(host);
 }
+#endif
 
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_s3c_runtime_suspend(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+
+	return sdhci_runtime_suspend_host(host);
+}
+
+static int sdhci_s3c_runtime_resume(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+
+	return sdhci_runtime_resume_host(host);
+}
+#endif
+
+#ifdef CONFIG_PM
 static const struct dev_pm_ops sdhci_s3c_pmops = {
-	.suspend	= sdhci_s3c_suspend,
-	.resume		= sdhci_s3c_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
+	SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
+			   NULL)
 };
 
 #define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
@@ -647,9 +716,31 @@
 #define SDHCI_S3C_PMOPS NULL
 #endif
 
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+	.sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK,
+};
+#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
+#else
+#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
+#endif
+
+static struct platform_device_id sdhci_s3c_driver_ids[] = {
+	{
+		.name		= "s3c-sdhci",
+		.driver_data	= (kernel_ulong_t)NULL,
+	}, {
+		.name		= "exynos4-sdhci",
+		.driver_data	= EXYNOS4_SDHCI_DRV_DATA,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
+
 static struct platform_driver sdhci_s3c_driver = {
 	.probe		= sdhci_s3c_probe,
 	.remove		= __devexit_p(sdhci_s3c_remove),
+	.id_table	= sdhci_s3c_driver_ids,
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= "s3c-sdhci",
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8262cad..9aa77f3 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2782,8 +2782,9 @@
 	    mmc_card_is_removable(mmc))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-	/* UHS-I mode(s) supported by the host controller. */
-	if (host->version >= SDHCI_SPEC_300)
+	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
+	if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+		       SDHCI_SUPPORT_DDR50))
 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
 
 	/* SDR104 supports also implies SDR50 support */
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index aafaf0b..724b35e 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -454,7 +454,8 @@
 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
 	else
 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
-				((fls(host->clk / clk) - 1) << 16));
+				((fls(DIV_ROUND_UP(host->clk,
+						   clk) - 1) - 1) << 16));
 
 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 }
@@ -1297,14 +1298,8 @@
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
-	mmc->f_max = host->clk;
-	/* close to 400KHz */
-	if (mmc->f_max < 51200000)
-		mmc->f_min = mmc->f_max / 128;
-	else if (mmc->f_max < 102400000)
-		mmc->f_min = mmc->f_max / 256;
-	else
-		mmc->f_min = mmc->f_max / 512;
+	mmc->f_max = host->clk / 2;
+	mmc->f_min = host->clk / 512;
 	if (pd->ocr)
 		mmc->ocr_avail = pd->ocr;
 	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 284cf34..5760c1a 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,9 +304,6 @@
 	  buffer in a flash partition where it can be read back at some
 	  later point.
 
-	  To use, add console=ttyMTDx to the kernel command line,
-	  where x is the MTD device number to use.
-
 config MTD_SWAP
 	tristate "Swap on MTD device support"
 	depends on MTD && SWAP
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 9bcd1f4..dbbd2ed 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -87,7 +87,7 @@
 
 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
 		     size_t *retlen, void **virt, resource_size_t *phys);
-static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
 
 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
@@ -262,9 +262,9 @@
 static void fixup_use_point(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
-	if (!mtd->point && map_is_linear(map)) {
-		mtd->point   = cfi_intelext_point;
-		mtd->unpoint = cfi_intelext_unpoint;
+	if (!mtd->_point && map_is_linear(map)) {
+		mtd->_point   = cfi_intelext_point;
+		mtd->_unpoint = cfi_intelext_unpoint;
 	}
 }
 
@@ -274,8 +274,8 @@
 	struct cfi_private *cfi = map->fldrv_priv;
 	if (cfi->cfiq->BufWriteTimeoutTyp) {
 		printk(KERN_INFO "Using buffer write method\n" );
-		mtd->write = cfi_intelext_write_buffers;
-		mtd->writev = cfi_intelext_writev;
+		mtd->_write = cfi_intelext_write_buffers;
+		mtd->_writev = cfi_intelext_writev;
 	}
 }
 
@@ -443,15 +443,15 @@
 	mtd->type = MTD_NORFLASH;
 
 	/* Fill in the default mtd operations */
-	mtd->erase   = cfi_intelext_erase_varsize;
-	mtd->read    = cfi_intelext_read;
-	mtd->write   = cfi_intelext_write_words;
-	mtd->sync    = cfi_intelext_sync;
-	mtd->lock    = cfi_intelext_lock;
-	mtd->unlock  = cfi_intelext_unlock;
-	mtd->is_locked = cfi_intelext_is_locked;
-	mtd->suspend = cfi_intelext_suspend;
-	mtd->resume  = cfi_intelext_resume;
+	mtd->_erase   = cfi_intelext_erase_varsize;
+	mtd->_read    = cfi_intelext_read;
+	mtd->_write   = cfi_intelext_write_words;
+	mtd->_sync    = cfi_intelext_sync;
+	mtd->_lock    = cfi_intelext_lock;
+	mtd->_unlock  = cfi_intelext_unlock;
+	mtd->_is_locked = cfi_intelext_is_locked;
+	mtd->_suspend = cfi_intelext_suspend;
+	mtd->_resume  = cfi_intelext_resume;
 	mtd->flags   = MTD_CAP_NORFLASH;
 	mtd->name    = map->name;
 	mtd->writesize = 1;
@@ -600,12 +600,12 @@
 	}
 
 #ifdef CONFIG_MTD_OTP
-	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
-	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
-	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
-	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
-	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
-	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
+	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
+	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
+	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
+	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
+	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
+	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
 #endif
 
 	/* This function has the potential to distort the reality
@@ -1017,8 +1017,6 @@
 	case FL_READY:
 	case FL_STATUS:
 	case FL_JEDEC_QUERY:
-		/* We should really make set_vpp() count, rather than doing this */
-		DISABLE_VPP(map);
 		break;
 	default:
 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
@@ -1324,7 +1322,7 @@
 	int chipnum;
 	int ret = 0;
 
-	if (!map->virt || (from + len > mtd->size))
+	if (!map->virt)
 		return -EINVAL;
 
 	/* Now lock the chip(s) to POINT state */
@@ -1334,7 +1332,6 @@
 	ofs = from - (chipnum << cfi->chipshift);
 
 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
-	*retlen = 0;
 	if (phys)
 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
 
@@ -1369,12 +1366,12 @@
 	return 0;
 }
 
-static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	unsigned long ofs;
-	int chipnum;
+	int chipnum, err = 0;
 
 	/* Now unlock the chip(s) POINT state */
 
@@ -1382,7 +1379,7 @@
 	chipnum = (from >> cfi->chipshift);
 	ofs = from - (chipnum <<  cfi->chipshift);
 
-	while (len) {
+	while (len && !err) {
 		unsigned long thislen;
 		struct flchip *chip;
 
@@ -1400,8 +1397,10 @@
 			chip->ref_point_counter--;
 			if(chip->ref_point_counter == 0)
 				chip->state = FL_READY;
-		} else
-			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
+		} else {
+			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
+			err = -EINVAL;
+		}
 
 		put_chip(map, chip, chip->start);
 		mutex_unlock(&chip->mutex);
@@ -1410,6 +1409,8 @@
 		ofs = 0;
 		chipnum++;
 	}
+
+	return err;
 }
 
 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
@@ -1456,8 +1457,6 @@
 	chipnum = (from >> cfi->chipshift);
 	ofs = from - (chipnum <<  cfi->chipshift);
 
-	*retlen = 0;
-
 	while (len) {
 		unsigned long thislen;
 
@@ -1551,7 +1550,8 @@
 	}
 
 	xip_enable(map, chip, adr);
- out:	put_chip(map, chip, adr);
+ out:	DISABLE_VPP(map);
+	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 	return ret;
 }
@@ -1565,10 +1565,6 @@
 	int chipnum;
 	unsigned long ofs;
 
-	*retlen = 0;
-	if (!len)
-		return 0;
-
 	chipnum = to >> cfi->chipshift;
 	ofs = to  - (chipnum << cfi->chipshift);
 
@@ -1794,7 +1790,8 @@
 	}
 
 	xip_enable(map, chip, cmd_adr);
- out:	put_chip(map, chip, cmd_adr);
+ out:	DISABLE_VPP(map);
+	put_chip(map, chip, cmd_adr);
 	mutex_unlock(&chip->mutex);
 	return ret;
 }
@@ -1813,7 +1810,6 @@
 	for (i = 0; i < count; i++)
 		len += vecs[i].iov_len;
 
-	*retlen = 0;
 	if (!len)
 		return 0;
 
@@ -1932,6 +1928,7 @@
 			ret = -EIO;
 		} else if (chipstatus & 0x20 && retries--) {
 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+			DISABLE_VPP(map);
 			put_chip(map, chip, adr);
 			mutex_unlock(&chip->mutex);
 			goto retry;
@@ -1944,7 +1941,8 @@
 	}
 
 	xip_enable(map, chip, adr);
- out:	put_chip(map, chip, adr);
+ out:	DISABLE_VPP(map);
+	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 	return ret;
 }
@@ -2086,7 +2084,8 @@
 	}
 
 	xip_enable(map, chip, adr);
-out:	put_chip(map, chip, adr);
+ out:	DISABLE_VPP(map);
+	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 	return ret;
 }
@@ -2483,7 +2482,7 @@
 			   allowed to. Or should we return -EAGAIN, because the upper layers
 			   ought to have already shut down anything which was using the device
 			   anyway? The latter for now. */
-			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
+			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
 			ret = -EAGAIN;
 		case FL_PM_SUSPENDED:
 			break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 8d70895..d02592e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -59,6 +59,9 @@
 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+				  size_t *retlen, const u_char *buf);
+
 static void cfi_amdstd_destroy(struct mtd_info *);
 
 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
@@ -189,7 +192,7 @@
 	struct cfi_private *cfi = map->fldrv_priv;
 	if (cfi->cfiq->BufWriteTimeoutTyp) {
 		pr_debug("Using buffer write method\n" );
-		mtd->write = cfi_amdstd_write_buffers;
+		mtd->_write = cfi_amdstd_write_buffers;
 	}
 }
 
@@ -228,8 +231,8 @@
 static void fixup_use_secsi(struct mtd_info *mtd)
 {
 	/* Setup for chips with a secsi area */
-	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
-	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
+	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
+	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
 }
 
 static void fixup_use_erase_chip(struct mtd_info *mtd)
@@ -238,7 +241,7 @@
 	struct cfi_private *cfi = map->fldrv_priv;
 	if ((cfi->cfiq->NumEraseRegions == 1) &&
 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
-		mtd->erase = cfi_amdstd_erase_chip;
+		mtd->_erase = cfi_amdstd_erase_chip;
 	}
 
 }
@@ -249,8 +252,8 @@
  */
 static void fixup_use_atmel_lock(struct mtd_info *mtd)
 {
-	mtd->lock = cfi_atmel_lock;
-	mtd->unlock = cfi_atmel_unlock;
+	mtd->_lock = cfi_atmel_lock;
+	mtd->_unlock = cfi_atmel_unlock;
 	mtd->flags |= MTD_POWERUP_LOCK;
 }
 
@@ -429,12 +432,12 @@
 	mtd->type = MTD_NORFLASH;
 
 	/* Fill in the default mtd operations */
-	mtd->erase   = cfi_amdstd_erase_varsize;
-	mtd->write   = cfi_amdstd_write_words;
-	mtd->read    = cfi_amdstd_read;
-	mtd->sync    = cfi_amdstd_sync;
-	mtd->suspend = cfi_amdstd_suspend;
-	mtd->resume  = cfi_amdstd_resume;
+	mtd->_erase   = cfi_amdstd_erase_varsize;
+	mtd->_write   = cfi_amdstd_write_words;
+	mtd->_read    = cfi_amdstd_read;
+	mtd->_sync    = cfi_amdstd_sync;
+	mtd->_suspend = cfi_amdstd_suspend;
+	mtd->_resume  = cfi_amdstd_resume;
 	mtd->flags   = MTD_CAP_NORFLASH;
 	mtd->name    = map->name;
 	mtd->writesize = 1;
@@ -443,6 +446,7 @@
 	pr_debug("MTD %s(): write buffer size %d\n", __func__,
 			mtd->writebufsize);
 
+	mtd->_panic_write = cfi_amdstd_panic_write;
 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 
 	if (cfi->cfi_mode==CFI_MODE_CFI){
@@ -770,8 +774,6 @@
 
 	case FL_READY:
 	case FL_STATUS:
-		/* We should really make set_vpp() count, rather than doing this */
-		DISABLE_VPP(map);
 		break;
 	default:
 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
@@ -1013,13 +1015,9 @@
 	int ret = 0;
 
 	/* ofs: offset within the first chip that the first read should start */
-
 	chipnum = (from >> cfi->chipshift);
 	ofs = from - (chipnum <<  cfi->chipshift);
 
-
-	*retlen = 0;
-
 	while (len) {
 		unsigned long thislen;
 
@@ -1097,16 +1095,11 @@
 	int chipnum;
 	int ret = 0;
 
-
 	/* ofs: offset within the first chip that the first read should start */
-
 	/* 8 secsi bytes per chip */
 	chipnum=from>>3;
 	ofs=from & 7;
 
-
-	*retlen = 0;
-
 	while (len) {
 		unsigned long thislen;
 
@@ -1234,6 +1227,7 @@
 	xip_enable(map, chip, adr);
  op_done:
 	chip->state = FL_READY;
+	DISABLE_VPP(map);
 	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 
@@ -1251,10 +1245,6 @@
 	unsigned long ofs, chipstart;
 	DECLARE_WAITQUEUE(wait, current);
 
-	*retlen = 0;
-	if (!len)
-		return 0;
-
 	chipnum = to >> cfi->chipshift;
 	ofs = to  - (chipnum << cfi->chipshift);
 	chipstart = cfi->chips[chipnum].start;
@@ -1476,6 +1466,7 @@
 	ret = -EIO;
  op_done:
 	chip->state = FL_READY;
+	DISABLE_VPP(map);
 	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 
@@ -1493,10 +1484,6 @@
 	int chipnum;
 	unsigned long ofs;
 
-	*retlen = 0;
-	if (!len)
-		return 0;
-
 	chipnum = to >> cfi->chipshift;
 	ofs = to  - (chipnum << cfi->chipshift);
 
@@ -1562,6 +1549,238 @@
 	return 0;
 }
 
+/*
+ * Wait for the flash chip to become ready to write data
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ */
+static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
+				 unsigned long adr)
+{
+	struct cfi_private *cfi = map->fldrv_priv;
+	int retries = 10;
+	int i;
+
+	/*
+	 * If the driver thinks the chip is idle, and no toggle bits
+	 * are changing, then the chip is actually idle for sure.
+	 */
+	if (chip->state == FL_READY && chip_ready(map, adr))
+		return 0;
+
+	/*
+	 * Try several times to reset the chip and then wait for it
+	 * to become idle. The upper limit of a few milliseconds of
+	 * delay isn't a big problem: the kernel is dying anyway. It
+	 * is more important to save the messages.
+	 */
+	while (retries > 0) {
+		const unsigned long timeo = (HZ / 1000) + 1;
+
+		/* send the reset command */
+		map_write(map, CMD(0xF0), chip->start);
+
+		/* wait for the chip to become ready */
+		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
+			if (chip_ready(map, adr))
+				return 0;
+
+			udelay(1);
+		}
+	}
+
+	/* the chip never became ready */
+	return -EBUSY;
+}
+
+/*
+ * Write out one word of data to a single flash chip during a kernel panic
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ *
+ * The implementation of this routine is intentionally similar to
+ * do_write_oneword(), in order to ease code maintenance.
+ */
+static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
+				  unsigned long adr, map_word datum)
+{
+	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
+	struct cfi_private *cfi = map->fldrv_priv;
+	int retry_cnt = 0;
+	map_word oldd;
+	int ret = 0;
+	int i;
+
+	adr += chip->start;
+
+	ret = cfi_amdstd_panic_wait(map, chip, adr);
+	if (ret)
+		return ret;
+
+	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
+			__func__, adr, datum.x[0]);
+
+	/*
+	 * Check for a NOP for the case when the datum to write is already
+	 * present - it saves time and works around buggy chips that corrupt
+	 * data at other locations when 0xff is written to a location that
+	 * already contains 0xff.
+	 */
+	oldd = map_read(map, adr);
+	if (map_word_equal(map, oldd, datum)) {
+		pr_debug("MTD %s(): NOP\n", __func__);
+		goto op_done;
+	}
+
+	ENABLE_VPP(map);
+
+retry:
+	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+	map_write(map, datum, adr);
+
+	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
+		if (chip_ready(map, adr))
+			break;
+
+		udelay(1);
+	}
+
+	if (!chip_good(map, adr, datum)) {
+		/* reset on all failures. */
+		map_write(map, CMD(0xF0), chip->start);
+		/* FIXME - should have reset delay before continuing */
+
+		if (++retry_cnt <= MAX_WORD_RETRIES)
+			goto retry;
+
+		ret = -EIO;
+	}
+
+op_done:
+	DISABLE_VPP(map);
+	return ret;
+}
+
+/*
+ * Write out some data during a kernel panic
+ *
+ * This is used by the mtdoops driver to save the dying messages from a
+ * kernel which has panic'd.
+ *
+ * This routine ignores all of the locking used throughout the rest of the
+ * driver, in order to ensure that the data gets written out no matter what
+ * state this driver (and the flash chip itself) was in when the kernel crashed.
+ *
+ * The implementation of this routine is intentionally similar to
+ * cfi_amdstd_write_words(), in order to ease code maintenance.
+ */
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+				  size_t *retlen, const u_char *buf)
+{
+	struct map_info *map = mtd->priv;
+	struct cfi_private *cfi = map->fldrv_priv;
+	unsigned long ofs, chipstart;
+	int ret = 0;
+	int chipnum;
+
+	chipnum = to >> cfi->chipshift;
+	ofs = to - (chipnum << cfi->chipshift);
+	chipstart = cfi->chips[chipnum].start;
+
+	/* If it's not bus aligned, do the first byte write */
+	if (ofs & (map_bankwidth(map) - 1)) {
+		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
+		int i = ofs - bus_ofs;
+		int n = 0;
+		map_word tmp_buf;
+
+		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
+		if (ret)
+			return ret;
+
+		/* Load 'tmp_buf' with old contents of flash */
+		tmp_buf = map_read(map, bus_ofs + chipstart);
+
+		/* Number of bytes to copy from buffer */
+		n = min_t(int, len, map_bankwidth(map) - i);
+
+		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+					     bus_ofs, tmp_buf);
+		if (ret)
+			return ret;
+
+		ofs += n;
+		buf += n;
+		(*retlen) += n;
+		len -= n;
+
+		if (ofs >> cfi->chipshift) {
+			chipnum++;
+			ofs = 0;
+			if (chipnum == cfi->numchips)
+				return 0;
+		}
+	}
+
+	/* We are now aligned, write as much as possible */
+	while (len >= map_bankwidth(map)) {
+		map_word datum;
+
+		datum = map_word_load(map, buf);
+
+		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+					     ofs, datum);
+		if (ret)
+			return ret;
+
+		ofs += map_bankwidth(map);
+		buf += map_bankwidth(map);
+		(*retlen) += map_bankwidth(map);
+		len -= map_bankwidth(map);
+
+		if (ofs >> cfi->chipshift) {
+			chipnum++;
+			ofs = 0;
+			if (chipnum == cfi->numchips)
+				return 0;
+
+			chipstart = cfi->chips[chipnum].start;
+		}
+	}
+
+	/* Write the trailing bytes if any */
+	if (len & (map_bankwidth(map) - 1)) {
+		map_word tmp_buf;
+
+		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
+		if (ret)
+			return ret;
+
+		tmp_buf = map_read(map, ofs + chipstart);
+
+		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+					     ofs, tmp_buf);
+		if (ret)
+			return ret;
+
+		(*retlen) += len;
+	}
+
+	return 0;
+}
+
 
 /*
  * Handle devices with one erase region, that only implement
@@ -1649,6 +1868,7 @@
 
 	chip->state = FL_READY;
 	xip_enable(map, chip, adr);
+	DISABLE_VPP(map);
 	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 
@@ -1739,6 +1959,7 @@
 	}
 
 	chip->state = FL_READY;
+	DISABLE_VPP(map);
 	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 	return ret;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 85e8018..096993f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -228,15 +228,15 @@
 		}
 
 	/* Also select the correct geometry setup too */
-	mtd->erase = cfi_staa_erase_varsize;
-	mtd->read = cfi_staa_read;
-        mtd->write = cfi_staa_write_buffers;
-	mtd->writev = cfi_staa_writev;
-	mtd->sync = cfi_staa_sync;
-	mtd->lock = cfi_staa_lock;
-	mtd->unlock = cfi_staa_unlock;
-	mtd->suspend = cfi_staa_suspend;
-	mtd->resume = cfi_staa_resume;
+	mtd->_erase = cfi_staa_erase_varsize;
+	mtd->_read = cfi_staa_read;
+	mtd->_write = cfi_staa_write_buffers;
+	mtd->_writev = cfi_staa_writev;
+	mtd->_sync = cfi_staa_sync;
+	mtd->_lock = cfi_staa_lock;
+	mtd->_unlock = cfi_staa_unlock;
+	mtd->_suspend = cfi_staa_suspend;
+	mtd->_resume = cfi_staa_resume;
 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
@@ -394,8 +394,6 @@
 	chipnum = (from >> cfi->chipshift);
 	ofs = from - (chipnum <<  cfi->chipshift);
 
-	*retlen = 0;
-
 	while (len) {
 		unsigned long thislen;
 
@@ -617,10 +615,6 @@
 	int chipnum;
 	unsigned long ofs;
 
-	*retlen = 0;
-	if (!len)
-		return 0;
-
 	chipnum = to >> cfi->chipshift;
 	ofs = to  - (chipnum << cfi->chipshift);
 
@@ -904,12 +898,6 @@
 	int i, first;
 	struct mtd_erase_region_info *regions = mtd->eraseregions;
 
-	if (instr->addr > mtd->size)
-		return -EINVAL;
-
-	if ((instr->len + instr->addr) > mtd->size)
-		return -EINVAL;
-
 	/* Check that both start and end of the requested erase are
 	 * aligned with the erasesize at the appropriate addresses.
 	 */
@@ -1155,9 +1143,6 @@
 	if (len & (mtd->erasesize -1))
 		return -EINVAL;
 
-	if ((len + ofs) > mtd->size)
-		return -EINVAL;
-
 	chipnum = ofs >> cfi->chipshift;
 	adr = ofs - (chipnum << cfi->chipshift);
 
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 8e46405..f992418f 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -173,12 +173,6 @@
 	int i, first;
 	struct mtd_erase_region_info *regions = mtd->eraseregions;
 
-	if (ofs > mtd->size)
-		return -EINVAL;
-
-	if ((len + ofs) > mtd->size)
-		return -EINVAL;
-
 	/* Check that both start and end of the requested erase are
 	 * aligned with the erasesize at the appropriate addresses.
 	 */
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 89c6595..800b0e8 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -101,7 +101,7 @@
 {
 	printk(KERN_NOTICE "using fwh lock/unlock method\n");
 	/* Setup for the chips with the fwh lock method */
-	mtd->lock   = fwh_lock_varsize;
-	mtd->unlock = fwh_unlock_varsize;
+	mtd->_lock   = fwh_lock_varsize;
+	mtd->_unlock = fwh_unlock_varsize;
 }
 #endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index f2b8729..f7a5bca 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -55,10 +55,10 @@
 	mtd->name 	= map->name;
 	mtd->type 	= MTD_ABSENT;
 	mtd->size 	= map->size;
-	mtd->erase 	= map_absent_erase;
-	mtd->read 	= map_absent_read;
-	mtd->write 	= map_absent_write;
-	mtd->sync 	= map_absent_sync;
+	mtd->_erase 	= map_absent_erase;
+	mtd->_read 	= map_absent_read;
+	mtd->_write 	= map_absent_write;
+	mtd->_sync 	= map_absent_sync;
 	mtd->flags 	= 0;
 	mtd->erasesize  = PAGE_SIZE;
 	mtd->writesize  = 1;
@@ -70,13 +70,11 @@
 
 static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
 {
-	*retlen = 0;
 	return -ENODEV;
 }
 
 static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
 {
-	*retlen = 0;
 	return -ENODEV;
 }
 
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 67640cc..991c2a1 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -64,11 +64,11 @@
 	mtd->name = map->name;
 	mtd->type = MTD_RAM;
 	mtd->size = map->size;
-	mtd->erase = mapram_erase;
-	mtd->get_unmapped_area = mapram_unmapped_area;
-	mtd->read = mapram_read;
-	mtd->write = mapram_write;
-	mtd->sync = mapram_nop;
+	mtd->_erase = mapram_erase;
+	mtd->_get_unmapped_area = mapram_unmapped_area;
+	mtd->_read = mapram_read;
+	mtd->_write = mapram_write;
+	mtd->_sync = mapram_nop;
 	mtd->flags = MTD_CAP_RAM;
 	mtd->writesize = 1;
 
@@ -122,14 +122,10 @@
 	unsigned long i;
 
 	allff = map_word_ff(map);
-
 	for (i=0; i<instr->len; i += map_bankwidth(map))
 		map_write(map, allff, instr->addr + i);
-
 	instr->state = MTD_ERASE_DONE;
-
 	mtd_erase_callback(instr);
-
 	return 0;
 }
 
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 593f73d..47a43cf 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -41,11 +41,11 @@
 	mtd->name = map->name;
 	mtd->type = MTD_ROM;
 	mtd->size = map->size;
-	mtd->get_unmapped_area = maprom_unmapped_area;
-	mtd->read = maprom_read;
-	mtd->write = maprom_write;
-	mtd->sync = maprom_nop;
-	mtd->erase = maprom_erase;
+	mtd->_get_unmapped_area = maprom_unmapped_area;
+	mtd->_read = maprom_read;
+	mtd->_write = maprom_write;
+	mtd->_sync = maprom_nop;
+	mtd->_erase = maprom_erase;
 	mtd->flags = MTD_CAP_ROM;
 	mtd->erasesize = map->size;
 	mtd->writesize = 1;
@@ -85,8 +85,7 @@
 
 static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
 {
-	printk(KERN_NOTICE "maprom_write called\n");
-	return -EIO;
+	return -EROFS;
 }
 
 static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 8d3dac4..4cdb2af 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -103,6 +103,13 @@
 	help
 	  This option enables FAST_READ access supported by ST M25Pxx.
 
+config MTD_SPEAR_SMI
+	tristate "SPEAR MTD NOR Support through SMI controller"
+	depends on PLAT_SPEAR
+	default y
+	help
+	  This enable SNOR support on SPEAR platforms using SMI controller
+
 config MTD_SST25L
 	tristate "Support SST25L (non JEDEC) SPI Flash chips"
 	depends on SPI_MASTER
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 56c7cd4..a4dd1d8 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_MTD_BLOCK2MTD)	+= block2mtd.o
 obj-$(CONFIG_MTD_DATAFLASH)	+= mtd_dataflash.o
 obj-$(CONFIG_MTD_M25P80)	+= m25p80.o
+obj-$(CONFIG_MTD_SPEAR_SMI)	+= spear_smi.o
 obj-$(CONFIG_MTD_SST25L)	+= sst25l.o
 
 CFLAGS_docg3.o			+= -I$(src)
\ No newline at end of file
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e7e46d1..a4a80b7 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -104,14 +104,6 @@
 	int offset = from & (PAGE_SIZE-1);
 	int cpylen;
 
-	if (from > mtd->size)
-		return -EINVAL;
-	if (from + len > mtd->size)
-		len = mtd->size - from;
-
-	if (retlen)
-		*retlen = 0;
-
 	while (len) {
 		if ((offset + len) > PAGE_SIZE)
 			cpylen = PAGE_SIZE - offset;	// multiple pages
@@ -148,8 +140,6 @@
 	int offset = to & ~PAGE_MASK;	// page offset
 	int cpylen;
 
-	if (retlen)
-		*retlen = 0;
 	while (len) {
 		if ((offset+len) > PAGE_SIZE)
 			cpylen = PAGE_SIZE - offset;	// multiple pages
@@ -188,13 +178,6 @@
 	struct block2mtd_dev *dev = mtd->priv;
 	int err;
 
-	if (!len)
-		return 0;
-	if (to >= mtd->size)
-		return -ENOSPC;
-	if (to + len > mtd->size)
-		len = mtd->size - to;
-
 	mutex_lock(&dev->write_mutex);
 	err = _block2mtd_write(dev, buf, to, len, retlen);
 	mutex_unlock(&dev->write_mutex);
@@ -283,13 +266,14 @@
 	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
 	dev->mtd.erasesize = erase_size;
 	dev->mtd.writesize = 1;
+	dev->mtd.writebufsize = PAGE_SIZE;
 	dev->mtd.type = MTD_RAM;
 	dev->mtd.flags = MTD_CAP_RAM;
-	dev->mtd.erase = block2mtd_erase;
-	dev->mtd.write = block2mtd_write;
-	dev->mtd.writev = mtd_writev;
-	dev->mtd.sync = block2mtd_sync;
-	dev->mtd.read = block2mtd_read;
+	dev->mtd._erase = block2mtd_erase;
+	dev->mtd._write = block2mtd_write;
+	dev->mtd._writev = mtd_writev;
+	dev->mtd._sync = block2mtd_sync;
+	dev->mtd._read = block2mtd_read;
 	dev->mtd.priv = dev;
 	dev->mtd.owner = THIS_MODULE;
 
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index b1cdf64..a4eb8b5 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -562,14 +562,15 @@
 
 	mtd->type = MTD_NANDFLASH;
 	mtd->flags = MTD_CAP_NANDFLASH;
-	mtd->writesize = 512;
+	mtd->writebufsize = mtd->writesize = 512;
 	mtd->oobsize = 16;
+	mtd->ecc_strength = 2;
 	mtd->owner = THIS_MODULE;
-	mtd->erase = doc_erase;
-	mtd->read = doc_read;
-	mtd->write = doc_write;
-	mtd->read_oob = doc_read_oob;
-	mtd->write_oob = doc_write_oob;
+	mtd->_erase = doc_erase;
+	mtd->_read = doc_read;
+	mtd->_write = doc_write;
+	mtd->_read_oob = doc_read_oob;
+	mtd->_write_oob = doc_write_oob;
 	this->curfloor = -1;
 	this->curchip = -1;
 	mutex_init(&this->lock);
@@ -602,13 +603,7 @@
 	int i, len256 = 0, ret=0;
 	size_t left = len;
 
-	/* Don't allow read past end of device */
-	if (from >= this->totlen)
-		return -EINVAL;
-
 	mutex_lock(&this->lock);
-
-	*retlen = 0;
 	while (left) {
 		len = left;
 
@@ -748,13 +743,7 @@
 	size_t left = len;
 	int status;
 
-	/* Don't allow write past end of device */
-	if (to >= this->totlen)
-		return -EINVAL;
-
 	mutex_lock(&this->lock);
-
-	*retlen = 0;
 	while (left) {
 		len = left;
 
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 7543b98..f692795 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -346,14 +346,15 @@
 
 	/* FIXME: erase size is not always 8KiB */
 	mtd->erasesize = 0x2000;
-	mtd->writesize = 512;
+	mtd->writebufsize = mtd->writesize = 512;
 	mtd->oobsize = 16;
+	mtd->ecc_strength = 2;
 	mtd->owner = THIS_MODULE;
-	mtd->erase = doc_erase;
-	mtd->read = doc_read;
-	mtd->write = doc_write;
-	mtd->read_oob = doc_read_oob;
-	mtd->write_oob = doc_write_oob;
+	mtd->_erase = doc_erase;
+	mtd->_read = doc_read;
+	mtd->_write = doc_write;
+	mtd->_read_oob = doc_read_oob;
+	mtd->_write_oob = doc_write_oob;
 	this->curfloor = -1;
 	this->curchip = -1;
 
@@ -383,10 +384,6 @@
 	void __iomem *docptr = this->virtadr;
 	struct Nand *mychip = &this->chips[from >> (this->chipshift)];
 
-	/* Don't allow read past end of device */
-	if (from >= this->totlen)
-		return -EINVAL;
-
 	/* Don't allow a single read to cross a 512-byte block boundary */
 	if (from + len > ((from | 0x1ff) + 1))
 		len = ((from | 0x1ff) + 1) - from;
@@ -494,10 +491,6 @@
 	void __iomem *docptr = this->virtadr;
 	struct Nand *mychip = &this->chips[to >> (this->chipshift)];
 
-	/* Don't allow write past end of device */
-	if (to >= this->totlen)
-		return -EINVAL;
-
 #if 0
 	/* Don't allow a single write to cross a 512-byte block boundary */
 	if (to + len > ( (to | 0x1ff) + 1))
@@ -599,7 +592,6 @@
 		printk("Error programming flash\n");
 		/* Error in programming
 		   FIXME: implement Bad Block Replacement (in nftl.c ??) */
-		*retlen = 0;
 		ret = -EIO;
 	}
 	dummy = ReadDOC(docptr, LastDataRead);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 177510d..04eb2e4 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -467,14 +467,15 @@
 
 	mtd->type = MTD_NANDFLASH;
 	mtd->flags = MTD_CAP_NANDFLASH;
-	mtd->writesize = 512;
+	mtd->writebufsize = mtd->writesize = 512;
 	mtd->oobsize = 16;
+	mtd->ecc_strength = 2;
 	mtd->owner = THIS_MODULE;
-	mtd->erase = doc_erase;
-	mtd->read = doc_read;
-	mtd->write = doc_write;
-	mtd->read_oob = doc_read_oob;
-	mtd->write_oob = doc_write_oob;
+	mtd->_erase = doc_erase;
+	mtd->_read = doc_read;
+	mtd->_write = doc_write;
+	mtd->_read_oob = doc_read_oob;
+	mtd->_write_oob = doc_write_oob;
 	this->curfloor = -1;
 	this->curchip = -1;
 
@@ -581,10 +582,6 @@
 	void __iomem * docptr = this->virtadr;
 	struct Nand *mychip = &this->chips[from >> (this->chipshift)];
 
-	/* Don't allow read past end of device */
-	if (from >= this->totlen)
-		return -EINVAL;
-
 	/* Don't allow a single read to cross a 512-byte block boundary */
 	if (from + len > ((from | 0x1ff) + 1))
 		len = ((from | 0x1ff) + 1) - from;
@@ -700,10 +697,6 @@
 	void __iomem * docptr = this->virtadr;
 	struct Nand *mychip = &this->chips[to >> (this->chipshift)];
 
-	/* Don't allow write past end of device */
-	if (to >= this->totlen)
-		return -EINVAL;
-
 	/* Don't allow writes which aren't exactly one block (512 bytes) */
 	if ((to & 0x1ff) || (len != 0x200))
 		return -EINVAL;
@@ -800,7 +793,6 @@
 		printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
 		/* Error in programming
 		   FIXME: implement Bad Block Replacement (in nftl.c ??) */
-		*retlen = 0;
 		ret = -EIO;
 	}
 	dummy = ReadDOC(docptr, Mplus_LastDataRead);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index ad11ef0a..8272c02 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -80,14 +80,9 @@
 	.oobavail = 8,
 };
 
-/**
- * struct docg3_bch - BCH engine
- */
-static struct bch_control *docg3_bch;
-
 static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
 {
-	u8 val = readb(docg3->base + reg);
+	u8 val = readb(docg3->cascade->base + reg);
 
 	trace_docg3_io(0, 8, reg, (int)val);
 	return val;
@@ -95,7 +90,7 @@
 
 static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
 {
-	u16 val = readw(docg3->base + reg);
+	u16 val = readw(docg3->cascade->base + reg);
 
 	trace_docg3_io(0, 16, reg, (int)val);
 	return val;
@@ -103,13 +98,13 @@
 
 static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
 {
-	writeb(val, docg3->base + reg);
+	writeb(val, docg3->cascade->base + reg);
 	trace_docg3_io(1, 8, reg, val);
 }
 
 static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
 {
-	writew(val, docg3->base + reg);
+	writew(val, docg3->cascade->base + reg);
 	trace_docg3_io(1, 16, reg, val);
 }
 
@@ -643,7 +638,8 @@
 
 	for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
 		ecc[i] = bitrev8(hwecc[i]);
-	numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+	numerrs = decode_bch(docg3->cascade->bch, NULL,
+			     DOC_ECC_BCH_COVERED_BYTES,
 			     NULL, ecc, NULL, errorpos);
 	BUG_ON(numerrs == -EINVAL);
 	if (numerrs < 0)
@@ -734,7 +730,7 @@
  * doc_read_page_getbytes - Reads bytes from a prepared page
  * @docg3: the device
  * @len: the number of bytes to be read (must be a multiple of 4)
- * @buf: the buffer to be filled in
+ * @buf: the buffer to be filled in (or NULL is forget bytes)
  * @first: 1 if first time read, DOC_READADDRESS should be set
  *
  */
@@ -849,7 +845,7 @@
 			struct mtd_oob_ops *ops)
 {
 	struct docg3 *docg3 = mtd->priv;
-	int block0, block1, page, ret, ofs = 0;
+	int block0, block1, page, ret, skip, ofs = 0;
 	u8 *oobbuf = ops->oobbuf;
 	u8 *buf = ops->datbuf;
 	size_t len, ooblen, nbdata, nboob;
@@ -869,34 +865,36 @@
 
 	doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
 		from, ops->mode, buf, len, oobbuf, ooblen);
-	if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
-	    (from % DOC_LAYOUT_PAGE_SIZE))
+	if (ooblen % DOC_LAYOUT_OOB_SIZE)
 		return -EINVAL;
 
-	ret = -EINVAL;
-	calc_block_sector(from + len, &block0, &block1, &page, &ofs,
-			  docg3->reliable);
-	if (block1 > docg3->max_block)
-		goto err;
+	if (from + len > mtd->size)
+		return -EINVAL;
 
 	ops->oobretlen = 0;
 	ops->retlen = 0;
 	ret = 0;
+	skip = from % DOC_LAYOUT_PAGE_SIZE;
+	mutex_lock(&docg3->cascade->lock);
 	while (!ret && (len > 0 || ooblen > 0)) {
-		calc_block_sector(from, &block0, &block1, &page, &ofs,
+		calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
 			docg3->reliable);
-		nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+		nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
 		nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
 		ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
 		if (ret < 0)
-			goto err;
+			goto out;
 		ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
 		if (ret < 0)
 			goto err_in_read;
-		ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+		ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+		if (ret < skip)
+			goto err_in_read;
+		ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
 		if (ret < nbdata)
 			goto err_in_read;
-		doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+		doc_read_page_getbytes(docg3,
+				       DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
 				       NULL, 0);
 		ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
 		if (ret < nboob)
@@ -950,13 +948,15 @@
 		len -= nbdata;
 		ooblen -= nboob;
 		from += DOC_LAYOUT_PAGE_SIZE;
+		skip = 0;
 	}
 
+out:
+	mutex_unlock(&docg3->cascade->lock);
 	return ret;
 err_in_read:
 	doc_read_page_finish(docg3);
-err:
-	return ret;
+	goto out;
 }
 
 /**
@@ -1114,10 +1114,10 @@
  */
 static int doc_write_erase_wait_status(struct docg3 *docg3)
 {
-	int status, ret = 0;
+	int i, status, ret = 0;
 
-	if (!doc_is_ready(docg3))
-		usleep_range(3000, 3000);
+	for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
+		msleep(20);
 	if (!doc_is_ready(docg3)) {
 		doc_dbg("Timeout reached and the chip is still not ready\n");
 		ret = -EAGAIN;
@@ -1196,18 +1196,19 @@
 	int block0, block1, page, ret, ofs = 0;
 
 	doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
-	doc_set_device_id(docg3, docg3->device_id);
 
 	info->state = MTD_ERASE_PENDING;
 	calc_block_sector(info->addr + info->len, &block0, &block1, &page,
 			  &ofs, docg3->reliable);
 	ret = -EINVAL;
-	if (block1 > docg3->max_block || page || ofs)
+	if (info->addr + info->len > mtd->size || page || ofs)
 		goto reset_err;
 
 	ret = 0;
 	calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
 			  docg3->reliable);
+	mutex_lock(&docg3->cascade->lock);
+	doc_set_device_id(docg3, docg3->device_id);
 	doc_set_reliable_mode(docg3);
 	for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
 		info->state = MTD_ERASING;
@@ -1215,6 +1216,7 @@
 		block0 += 2;
 		block1 += 2;
 	}
+	mutex_unlock(&docg3->cascade->lock);
 
 	if (ret)
 		goto reset_err;
@@ -1401,7 +1403,7 @@
 			 struct mtd_oob_ops *ops)
 {
 	struct docg3 *docg3 = mtd->priv;
-	int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+	int ret, autoecc, oobdelta;
 	u8 *oobbuf = ops->oobbuf;
 	u8 *buf = ops->datbuf;
 	size_t len, ooblen;
@@ -1438,12 +1440,8 @@
 	if (len && ooblen &&
 	    (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
 		return -EINVAL;
-
-	ret = -EINVAL;
-	calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
-			  docg3->reliable);
-	if (block1 > docg3->max_block)
-		goto err;
+	if (ofs + len > mtd->size)
+		return -EINVAL;
 
 	ops->oobretlen = 0;
 	ops->retlen = 0;
@@ -1457,6 +1455,7 @@
 	if (autoecc < 0)
 		return autoecc;
 
+	mutex_lock(&docg3->cascade->lock);
 	while (!ret && len > 0) {
 		memset(oob, 0, sizeof(oob));
 		if (ofs == docg3->oob_write_ofs)
@@ -1477,8 +1476,9 @@
 		}
 		ops->retlen += DOC_LAYOUT_PAGE_SIZE;
 	}
-err:
+
 	doc_set_device_id(docg3, 0);
+	mutex_unlock(&docg3->cascade->lock);
 	return ret;
 }
 
@@ -1535,9 +1535,11 @@
 	struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
 	int dps0;
 
+	mutex_lock(&docg3->cascade->lock);
 	doc_set_device_id(docg3, docg3->device_id);
 	dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
 	doc_set_device_id(docg3, 0);
+	mutex_unlock(&docg3->cascade->lock);
 
 	return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
 }
@@ -1548,9 +1550,11 @@
 	struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
 	int dps1;
 
+	mutex_lock(&docg3->cascade->lock);
 	doc_set_device_id(docg3, docg3->device_id);
 	dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
 	doc_set_device_id(docg3, 0);
+	mutex_unlock(&docg3->cascade->lock);
 
 	return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
 }
@@ -1565,10 +1569,12 @@
 	if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
 		return -EINVAL;
 
+	mutex_lock(&docg3->cascade->lock);
 	doc_set_device_id(docg3, docg3->device_id);
 	for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
 		doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
 	doc_set_device_id(docg3, 0);
+	mutex_unlock(&docg3->cascade->lock);
 	return count;
 }
 
@@ -1582,10 +1588,12 @@
 	if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
 		return -EINVAL;
 
+	mutex_lock(&docg3->cascade->lock);
 	doc_set_device_id(docg3, docg3->device_id);
 	for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
 		doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
 	doc_set_device_id(docg3, 0);
+	mutex_unlock(&docg3->cascade->lock);
 	return count;
 }
 
@@ -1601,13 +1609,13 @@
 };
 
 static int doc_register_sysfs(struct platform_device *pdev,
-			      struct mtd_info **floors)
+			      struct docg3_cascade *cascade)
 {
 	int ret = 0, floor, i = 0;
 	struct device *dev = &pdev->dev;
 
-	for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
-	     floor++)
+	for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
+		     cascade->floors[floor]; floor++)
 		for (i = 0; !ret && i < 4; i++)
 			ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
 	if (!ret)
@@ -1621,12 +1629,12 @@
 }
 
 static void doc_unregister_sysfs(struct platform_device *pdev,
-				 struct mtd_info **floors)
+				 struct docg3_cascade *cascade)
 {
 	struct device *dev = &pdev->dev;
 	int floor, i;
 
-	for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+	for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
 	     floor++)
 		for (i = 0; i < 4; i++)
 			device_remove_file(dev, &doc_sys_attrs[floor][i]);
@@ -1640,7 +1648,11 @@
 	struct docg3 *docg3 = (struct docg3 *)s->private;
 
 	int pos = 0;
-	u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+	u8 fctrl;
+
+	mutex_lock(&docg3->cascade->lock);
+	fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+	mutex_unlock(&docg3->cascade->lock);
 
 	pos += seq_printf(s,
 		 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
@@ -1658,9 +1670,12 @@
 {
 	struct docg3 *docg3 = (struct docg3 *)s->private;
 
-	int pos = 0;
-	int pctrl = doc_register_readb(docg3, DOC_ASICMODE);
-	int mode = pctrl & 0x03;
+	int pos = 0, pctrl, mode;
+
+	mutex_lock(&docg3->cascade->lock);
+	pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+	mode = pctrl & 0x03;
+	mutex_unlock(&docg3->cascade->lock);
 
 	pos += seq_printf(s,
 			 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
@@ -1692,7 +1707,11 @@
 {
 	struct docg3 *docg3 = (struct docg3 *)s->private;
 	int pos = 0;
-	int id = doc_register_readb(docg3, DOC_DEVICESELECT);
+	int id;
+
+	mutex_lock(&docg3->cascade->lock);
+	id = doc_register_readb(docg3, DOC_DEVICESELECT);
+	mutex_unlock(&docg3->cascade->lock);
 
 	pos += seq_printf(s, "DeviceId = %d\n", id);
 	return pos;
@@ -1705,6 +1724,7 @@
 	int pos = 0;
 	int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
 
+	mutex_lock(&docg3->cascade->lock);
 	protect = doc_register_readb(docg3, DOC_PROTECTION);
 	dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
 	dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
@@ -1712,6 +1732,7 @@
 	dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
 	dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
 	dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
+	mutex_unlock(&docg3->cascade->lock);
 
 	pos += seq_printf(s, "Protection = 0x%02x (",
 			 protect);
@@ -1804,7 +1825,7 @@
 
 	switch (chip_id) {
 	case DOC_CHIPID_G3:
-		mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+		mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
 				      docg3->device_id);
 		docg3->max_block = 2047;
 		break;
@@ -1817,16 +1838,17 @@
 	mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
 	if (docg3->reliable == 2)
 		mtd->erasesize /= 2;
-	mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+	mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
 	mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
 	mtd->owner = THIS_MODULE;
-	mtd->erase = doc_erase;
-	mtd->read = doc_read;
-	mtd->write = doc_write;
-	mtd->read_oob = doc_read_oob;
-	mtd->write_oob = doc_write_oob;
-	mtd->block_isbad = doc_block_isbad;
+	mtd->_erase = doc_erase;
+	mtd->_read = doc_read;
+	mtd->_write = doc_write;
+	mtd->_read_oob = doc_read_oob;
+	mtd->_write_oob = doc_write_oob;
+	mtd->_block_isbad = doc_block_isbad;
 	mtd->ecclayout = &docg3_oobinfo;
+	mtd->ecc_strength = DOC_ECC_BCH_T;
 }
 
 /**
@@ -1834,6 +1856,7 @@
  * @base: the io space where the device is probed
  * @floor: the floor of the probed device
  * @dev: the device
+ * @cascade: the cascade of chips this devices will belong to
  *
  * Checks whether a device at the specified IO range, and floor is available.
  *
@@ -1841,8 +1864,8 @@
  * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
  * launched.
  */
-static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
-					 struct device *dev)
+static struct mtd_info * __init
+doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
 {
 	int ret, bbt_nbpages;
 	u16 chip_id, chip_id_inv;
@@ -1865,7 +1888,7 @@
 
 	docg3->dev = dev;
 	docg3->device_id = floor;
-	docg3->base = base;
+	docg3->cascade = cascade;
 	doc_set_device_id(docg3, docg3->device_id);
 	if (!floor)
 		doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
@@ -1882,7 +1905,7 @@
 	switch (chip_id) {
 	case DOC_CHIPID_G3:
 		doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
-			 base, floor);
+			 docg3->cascade->base, floor);
 		break;
 	default:
 		doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
@@ -1927,10 +1950,12 @@
 static int docg3_resume(struct platform_device *pdev)
 {
 	int i;
+	struct docg3_cascade *cascade;
 	struct mtd_info **docg3_floors, *mtd;
 	struct docg3 *docg3;
 
-	docg3_floors = platform_get_drvdata(pdev);
+	cascade = platform_get_drvdata(pdev);
+	docg3_floors = cascade->floors;
 	mtd = docg3_floors[0];
 	docg3 = mtd->priv;
 
@@ -1952,11 +1977,13 @@
 static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	int floor, i;
+	struct docg3_cascade *cascade;
 	struct mtd_info **docg3_floors, *mtd;
 	struct docg3 *docg3;
 	u8 ctrl, pwr_down;
 
-	docg3_floors = platform_get_drvdata(pdev);
+	cascade = platform_get_drvdata(pdev);
+	docg3_floors = cascade->floors;
 	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
 		mtd = docg3_floors[floor];
 		if (!mtd)
@@ -2006,7 +2033,7 @@
 	struct resource *ress;
 	void __iomem *base;
 	int ret, floor, found = 0;
-	struct mtd_info **docg3_floors;
+	struct docg3_cascade *cascade;
 
 	ret = -ENXIO;
 	ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2017,17 +2044,19 @@
 	base = ioremap(ress->start, DOC_IOSPACE_SIZE);
 
 	ret = -ENOMEM;
-	docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
-			       GFP_KERNEL);
-	if (!docg3_floors)
+	cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
+			  GFP_KERNEL);
+	if (!cascade)
 		goto nomem1;
-	docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+	cascade->base = base;
+	mutex_init(&cascade->lock);
+	cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
 			     DOC_ECC_BCH_PRIMPOLY);
-	if (!docg3_bch)
+	if (!cascade->bch)
 		goto nomem2;
 
 	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
-		mtd = doc_probe_device(base, floor, dev);
+		mtd = doc_probe_device(cascade, floor, dev);
 		if (IS_ERR(mtd)) {
 			ret = PTR_ERR(mtd);
 			goto err_probe;
@@ -2038,7 +2067,7 @@
 			else
 				continue;
 		}
-		docg3_floors[floor] = mtd;
+		cascade->floors[floor] = mtd;
 		ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
 						0);
 		if (ret)
@@ -2046,26 +2075,26 @@
 		found++;
 	}
 
-	ret = doc_register_sysfs(pdev, docg3_floors);
+	ret = doc_register_sysfs(pdev, cascade);
 	if (ret)
 		goto err_probe;
 	if (!found)
 		goto notfound;
 
-	platform_set_drvdata(pdev, docg3_floors);
-	doc_dbg_register(docg3_floors[0]->priv);
+	platform_set_drvdata(pdev, cascade);
+	doc_dbg_register(cascade->floors[0]->priv);
 	return 0;
 
 notfound:
 	ret = -ENODEV;
 	dev_info(dev, "No supported DiskOnChip found\n");
 err_probe:
-	free_bch(docg3_bch);
+	kfree(cascade->bch);
 	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
-		if (docg3_floors[floor])
-			doc_release_device(docg3_floors[floor]);
+		if (cascade->floors[floor])
+			doc_release_device(cascade->floors[floor]);
 nomem2:
-	kfree(docg3_floors);
+	kfree(cascade);
 nomem1:
 	iounmap(base);
 noress:
@@ -2080,19 +2109,19 @@
  */
 static int __exit docg3_release(struct platform_device *pdev)
 {
-	struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
-	struct docg3 *docg3 = docg3_floors[0]->priv;
-	void __iomem *base = docg3->base;
+	struct docg3_cascade *cascade = platform_get_drvdata(pdev);
+	struct docg3 *docg3 = cascade->floors[0]->priv;
+	void __iomem *base = cascade->base;
 	int floor;
 
-	doc_unregister_sysfs(pdev, docg3_floors);
+	doc_unregister_sysfs(pdev, cascade);
 	doc_dbg_unregister(docg3);
 	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
-		if (docg3_floors[floor])
-			doc_release_device(docg3_floors[floor]);
+		if (cascade->floors[floor])
+			doc_release_device(cascade->floors[floor]);
 
-	kfree(docg3_floors);
-	free_bch(docg3_bch);
+	free_bch(docg3->cascade->bch);
+	kfree(cascade);
 	iounmap(base);
 	return 0;
 }
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index db0da43..19fb93f 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -22,6 +22,8 @@
 #ifndef _MTD_DOCG3_H
 #define _MTD_DOCG3_H
 
+#include <linux/mtd/mtd.h>
+
 /*
  * Flash memory areas :
  *   - 0x0000 .. 0x07ff : IPL
@@ -267,9 +269,23 @@
 #define DOC_LAYOUT_DPS_KEY_LENGTH	8
 
 /**
+ * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
+ * @floors: floors (ie. one physical docg3 chip is one floor)
+ * @base: IO space to access all chips in the cascade
+ * @bch: the BCH correcting control structure
+ * @lock: lock to protect docg3 IO space from concurrent accesses
+ */
+struct docg3_cascade {
+	struct mtd_info *floors[DOC_MAX_NBFLOORS];
+	void __iomem *base;
+	struct bch_control *bch;
+	struct mutex lock;
+};
+
+/**
  * struct docg3 - DiskOnChip driver private data
  * @dev: the device currently under control
- * @base: mapped IO space
+ * @cascade: the cascade this device belongs to
  * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
  * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
 
@@ -287,7 +303,7 @@
  */
 struct docg3 {
 	struct device *dev;
-	void __iomem *base;
+	struct docg3_cascade *cascade;
 	unsigned int device_id:4;
 	unsigned int if_cfg:1;
 	unsigned int reliable:2;
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 3a11ea6..82bd00a 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -367,9 +367,6 @@
    printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
 #endif
 
-   /* sanity checks */
-   if (instr->addr + instr->len > mtd->size) return (-EINVAL);
-
    /*
 	* check that both start and end of the requested erase are
 	* aligned with the erasesize at the appropriate addresses.
@@ -440,10 +437,6 @@
    printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
 #endif
 
-   /* sanity checks */
-   if (!len) return (0);
-   if (from + len > mtd->size) return (-EINVAL);
-
    /* we always read len bytes */
    *retlen = len;
 
@@ -522,11 +515,8 @@
    printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
 #endif
 
-   *retlen = 0;
-
    /* sanity checks */
    if (!len) return (0);
-   if (to + len > mtd->size) return (-EINVAL);
 
    /* first, we write a 0xFF.... padded byte until we reach a dword boundary */
    if (to & (BUSWIDTH - 1))
@@ -630,14 +620,15 @@
    mtd.name = module_name;
    mtd.type = MTD_NORFLASH;
    mtd.writesize = 1;
+   mtd.writebufsize = 4;
    mtd.flags = MTD_CAP_NORFLASH;
    mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
    mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
    mtd.numeraseregions = ARRAY_SIZE(erase_regions);
    mtd.eraseregions = erase_regions;
-   mtd.erase = flash_erase;
-   mtd.read = flash_read;
-   mtd.write = flash_write;
+   mtd._erase = flash_erase;
+   mtd._read = flash_read;
+   mtd._write = flash_write;
    mtd.owner = THIS_MODULE;
 
 #ifdef LART_DEBUG
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 7c60ddd..1924d24 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -288,9 +288,6 @@
 			__func__, (long long)instr->addr,
 			(long long)instr->len);
 
-	/* sanity checks */
-	if (instr->addr + instr->len > flash->mtd.size)
-		return -EINVAL;
 	div_u64_rem(instr->len, mtd->erasesize, &rem);
 	if (rem)
 		return -EINVAL;
@@ -349,13 +346,6 @@
 	pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
 			__func__, (u32)from, len);
 
-	/* sanity checks */
-	if (!len)
-		return 0;
-
-	if (from + len > flash->mtd.size)
-		return -EINVAL;
-
 	spi_message_init(&m);
 	memset(t, 0, (sizeof t));
 
@@ -371,9 +361,6 @@
 	t[1].len = len;
 	spi_message_add_tail(&t[1], &m);
 
-	/* Byte count starts at zero. */
-	*retlen = 0;
-
 	mutex_lock(&flash->lock);
 
 	/* Wait till previous write/erase is done. */
@@ -417,15 +404,6 @@
 	pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
 			__func__, (u32)to, len);
 
-	*retlen = 0;
-
-	/* sanity checks */
-	if (!len)
-		return(0);
-
-	if (to + len > flash->mtd.size)
-		return -EINVAL;
-
 	spi_message_init(&m);
 	memset(t, 0, (sizeof t));
 
@@ -509,15 +487,6 @@
 	pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
 			__func__, (u32)to, len);
 
-	*retlen = 0;
-
-	/* sanity checks */
-	if (!len)
-		return 0;
-
-	if (to + len > flash->mtd.size)
-		return -EINVAL;
-
 	spi_message_init(&m);
 	memset(t, 0, (sizeof t));
 
@@ -908,14 +877,14 @@
 	flash->mtd.writesize = 1;
 	flash->mtd.flags = MTD_CAP_NORFLASH;
 	flash->mtd.size = info->sector_size * info->n_sectors;
-	flash->mtd.erase = m25p80_erase;
-	flash->mtd.read = m25p80_read;
+	flash->mtd._erase = m25p80_erase;
+	flash->mtd._read = m25p80_read;
 
 	/* sst flash chips use AAI word program */
 	if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
-		flash->mtd.write = sst_write;
+		flash->mtd._write = sst_write;
 	else
-		flash->mtd.write = m25p80_write;
+		flash->mtd._write = m25p80_write;
 
 	/* prefer "small sector" erase if possible */
 	if (info->flags & SECT_4K) {
@@ -932,6 +901,7 @@
 	ppdata.of_node = spi->dev.of_node;
 	flash->mtd.dev.parent = &spi->dev;
 	flash->page_size = info->page_size;
+	flash->mtd.writebufsize = flash->page_size;
 
 	if (info->addr_width)
 		flash->addr_width = info->addr_width;
@@ -1004,21 +974,7 @@
 	 */
 };
 
-
-static int __init m25p80_init(void)
-{
-	return spi_register_driver(&m25p80_driver);
-}
-
-
-static void __exit m25p80_exit(void)
-{
-	spi_unregister_driver(&m25p80_driver);
-}
-
-
-module_init(m25p80_init);
-module_exit(m25p80_exit);
+module_spi_driver(m25p80_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 8423fb6..182849d 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -59,12 +59,8 @@
 {
 	struct ms02nv_private *mp = mtd->priv;
 
-	if (from + len > mtd->size)
-		return -EINVAL;
-
 	memcpy(buf, mp->uaddr + from, len);
 	*retlen = len;
-
 	return 0;
 }
 
@@ -73,12 +69,8 @@
 {
 	struct ms02nv_private *mp = mtd->priv;
 
-	if (to + len > mtd->size)
-		return -EINVAL;
-
 	memcpy(mp->uaddr + to, buf, len);
 	*retlen = len;
-
 	return 0;
 }
 
@@ -215,8 +207,8 @@
 	mtd->size = fixsize;
 	mtd->name = (char *)ms02nv_name;
 	mtd->owner = THIS_MODULE;
-	mtd->read = ms02nv_read;
-	mtd->write = ms02nv_write;
+	mtd->_read = ms02nv_read;
+	mtd->_write = ms02nv_write;
 	mtd->writesize = 1;
 
 	ret = -EIO;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 236057e..928fb0e 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -164,9 +164,6 @@
 	      dev_name(&spi->dev), (long long)instr->addr,
 	      (long long)instr->len);
 
-	/* Sanity checks */
-	if (instr->addr + instr->len > mtd->size)
-		return -EINVAL;
 	div_u64_rem(instr->len, priv->page_size, &rem);
 	if (rem)
 		return -EINVAL;
@@ -252,14 +249,6 @@
 	pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
 			(unsigned)from, (unsigned)(from + len));
 
-	*retlen = 0;
-
-	/* Sanity checks */
-	if (!len)
-		return 0;
-	if (from + len > mtd->size)
-		return -EINVAL;
-
 	/* Calculate flash page/byte address */
 	addr = (((unsigned)from / priv->page_size) << priv->page_offset)
 		+ ((unsigned)from % priv->page_size);
@@ -328,14 +317,6 @@
 	pr_debug("%s: write 0x%x..0x%x\n",
 		dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
 
-	*retlen = 0;
-
-	/* Sanity checks */
-	if (!len)
-		return 0;
-	if ((to + len) > mtd->size)
-		return -EINVAL;
-
 	spi_message_init(&msg);
 
 	x[0].tx_buf = command = priv->command;
@@ -490,8 +471,6 @@
 
 	if ((off + len) > 64)
 		len = 64 - off;
-	if (len == 0)
-		return len;
 
 	spi_message_init(&m);
 
@@ -611,16 +590,16 @@
 
 static char *otp_setup(struct mtd_info *device, char revision)
 {
-	device->get_fact_prot_info = dataflash_get_otp_info;
-	device->read_fact_prot_reg = dataflash_read_fact_otp;
-	device->get_user_prot_info = dataflash_get_otp_info;
-	device->read_user_prot_reg = dataflash_read_user_otp;
+	device->_get_fact_prot_info = dataflash_get_otp_info;
+	device->_read_fact_prot_reg = dataflash_read_fact_otp;
+	device->_get_user_prot_info = dataflash_get_otp_info;
+	device->_read_user_prot_reg = dataflash_read_user_otp;
 
 	/* rev c parts (at45db321c and at45db1281 only!) use a
 	 * different write procedure; not (yet?) implemented.
 	 */
 	if (revision > 'c')
-		device->write_user_prot_reg = dataflash_write_user_otp;
+		device->_write_user_prot_reg = dataflash_write_user_otp;
 
 	return ", OTP";
 }
@@ -672,9 +651,9 @@
 	device->owner = THIS_MODULE;
 	device->type = MTD_DATAFLASH;
 	device->flags = MTD_WRITEABLE;
-	device->erase = dataflash_erase;
-	device->read = dataflash_read;
-	device->write = dataflash_write;
+	device->_erase = dataflash_erase;
+	device->_read = dataflash_read;
+	device->_write = dataflash_write;
 	device->priv = priv;
 
 	device->dev.parent = &spi->dev;
@@ -946,18 +925,7 @@
 	/* FIXME:  investigate suspend and resume... */
 };
 
-static int __init dataflash_init(void)
-{
-	return spi_register_driver(&dataflash_driver);
-}
-module_init(dataflash_init);
-
-static void __exit dataflash_exit(void)
-{
-	spi_unregister_driver(&dataflash_driver);
-}
-module_exit(dataflash_exit);
-
+module_spi_driver(dataflash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Andrew Victor, David Brownell");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 2562689..ec59d65 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -34,34 +34,23 @@
 
 static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
-	if (instr->addr + instr->len > mtd->size)
-		return -EINVAL;
-
 	memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
-
 	instr->state = MTD_ERASE_DONE;
 	mtd_erase_callback(instr);
-
 	return 0;
 }
 
 static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
 		size_t *retlen, void **virt, resource_size_t *phys)
 {
-	if (from + len > mtd->size)
-		return -EINVAL;
-
-	/* can we return a physical address with this driver? */
-	if (phys)
-		return -EINVAL;
-
 	*virt = mtd->priv + from;
 	*retlen = len;
 	return 0;
 }
 
-static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
+	return 0;
 }
 
 /*
@@ -80,11 +69,7 @@
 static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
 		size_t *retlen, u_char *buf)
 {
-	if (from + len > mtd->size)
-		return -EINVAL;
-
 	memcpy(buf, mtd->priv + from, len);
-
 	*retlen = len;
 	return 0;
 }
@@ -92,11 +77,7 @@
 static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
 		size_t *retlen, const u_char *buf)
 {
-	if (to + len > mtd->size)
-		return -EINVAL;
-
 	memcpy((char *)mtd->priv + to, buf, len);
-
 	*retlen = len;
 	return 0;
 }
@@ -126,12 +107,12 @@
 	mtd->priv = mapped_address;
 
 	mtd->owner = THIS_MODULE;
-	mtd->erase = ram_erase;
-	mtd->point = ram_point;
-	mtd->unpoint = ram_unpoint;
-	mtd->get_unmapped_area = ram_get_unmapped_area;
-	mtd->read = ram_read;
-	mtd->write = ram_write;
+	mtd->_erase = ram_erase;
+	mtd->_point = ram_point;
+	mtd->_unpoint = ram_unpoint;
+	mtd->_get_unmapped_area = ram_get_unmapped_area;
+	mtd->_read = ram_read;
+	mtd->_write = ram_write;
 
 	if (mtd_device_register(mtd, NULL, 0))
 		return -EIO;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 23423bd..67823de 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -33,45 +33,33 @@
 
 static LIST_HEAD(phram_list);
 
-
 static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
 	u_char *start = mtd->priv;
 
-	if (instr->addr + instr->len > mtd->size)
-		return -EINVAL;
-
 	memset(start + instr->addr, 0xff, instr->len);
 
-	/* This'll catch a few races. Free the thing before returning :)
+	/*
+	 * This'll catch a few races. Free the thing before returning :)
 	 * I don't feel at all ashamed. This kind of thing is possible anyway
 	 * with flash, but unlikely.
 	 */
-
 	instr->state = MTD_ERASE_DONE;
-
 	mtd_erase_callback(instr);
-
 	return 0;
 }
 
 static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
 		size_t *retlen, void **virt, resource_size_t *phys)
 {
-	if (from + len > mtd->size)
-		return -EINVAL;
-
-	/* can we return a physical address with this driver? */
-	if (phys)
-		return -EINVAL;
-
 	*virt = mtd->priv + from;
 	*retlen = len;
 	return 0;
 }
 
-static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
+	return 0;
 }
 
 static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -79,14 +67,7 @@
 {
 	u_char *start = mtd->priv;
 
-	if (from >= mtd->size)
-		return -EINVAL;
-
-	if (len > mtd->size - from)
-		len = mtd->size - from;
-
 	memcpy(buf, start + from, len);
-
 	*retlen = len;
 	return 0;
 }
@@ -96,20 +77,11 @@
 {
 	u_char *start = mtd->priv;
 
-	if (to >= mtd->size)
-		return -EINVAL;
-
-	if (len > mtd->size - to)
-		len = mtd->size - to;
-
 	memcpy(start + to, buf, len);
-
 	*retlen = len;
 	return 0;
 }
 
-
-
 static void unregister_devices(void)
 {
 	struct phram_mtd_list *this, *safe;
@@ -142,11 +114,11 @@
 	new->mtd.name = name;
 	new->mtd.size = len;
 	new->mtd.flags = MTD_CAP_RAM;
-        new->mtd.erase = phram_erase;
-	new->mtd.point = phram_point;
-	new->mtd.unpoint = phram_unpoint;
-	new->mtd.read = phram_read;
-	new->mtd.write = phram_write;
+	new->mtd._erase = phram_erase;
+	new->mtd._point = phram_point;
+	new->mtd._unpoint = phram_unpoint;
+	new->mtd._read = phram_read;
+	new->mtd._write = phram_write;
 	new->mtd.owner = THIS_MODULE;
 	new->mtd.type = MTD_RAM;
 	new->mtd.erasesize = PAGE_SIZE;
@@ -233,7 +205,17 @@
 	return 1;		\
 } while (0)
 
-static int phram_setup(const char *val, struct kernel_param *kp)
+/*
+ * This shall contain the module parameter if any. It is of the form:
+ * - phram=<device>,<address>,<size> for module case
+ * - phram.phram=<device>,<address>,<size> for built-in case
+ * We leave 64 bytes for the device name, 12 for the address and 12 for the
+ * size.
+ * Example: phram.phram=rootfs,0xa0000000,512Mi
+ */
+static __initdata char phram_paramline[64+12+12];
+
+static int __init phram_setup(const char *val)
 {
 	char buf[64+12+12], *str = buf;
 	char *token[3];
@@ -282,12 +264,28 @@
 	return ret;
 }
 
-module_param_call(phram, phram_setup, NULL, NULL, 000);
+static int __init phram_param_call(const char *val, struct kernel_param *kp)
+{
+	/*
+	 * This function is always called before 'init_phram()', whether
+	 * built-in or module.
+	 */
+	if (strlen(val) >= sizeof(phram_paramline))
+		return -ENOSPC;
+	strcpy(phram_paramline, val);
+
+	return 0;
+}
+
+module_param_call(phram, phram_param_call, NULL, NULL, 000);
 MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
 
 
 static int __init init_phram(void)
 {
+	if (phram_paramline[0])
+		return phram_setup(phram_paramline);
+
 	return 0;
 }
 
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 5d53c57..0c51b98 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -94,12 +94,48 @@
 #include <linux/ioctl.h>
 #include <asm/io.h>
 #include <linux/pci.h>
-
 #include <linux/mtd/mtd.h>
-#include <linux/mtd/pmc551.h>
+
+#define PMC551_VERSION \
+	"Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
+
+#define PCI_VENDOR_ID_V3_SEMI 0x11b0
+#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
+
+#define PMC551_PCI_MEM_MAP0 0x50
+#define PMC551_PCI_MEM_MAP1 0x54
+#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
+#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
+#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
+#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
+
+#define PMC551_SDRAM_MA  0x60
+#define PMC551_SDRAM_CMD 0x62
+#define PMC551_DRAM_CFG  0x64
+#define PMC551_SYS_CTRL_REG 0x78
+
+#define PMC551_DRAM_BLK0 0x68
+#define PMC551_DRAM_BLK1 0x6c
+#define PMC551_DRAM_BLK2 0x70
+#define PMC551_DRAM_BLK3 0x74
+#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
+#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
+#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
+
+struct mypriv {
+	struct pci_dev *dev;
+	u_char *start;
+	u32 base_map0;
+	u32 curr_map0;
+	u32 asize;
+	struct mtd_info *nextpmc551;
+};
 
 static struct mtd_info *pmc551list;
 
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+			size_t *retlen, void **virt, resource_size_t *phys);
+
 static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
 	struct mypriv *priv = mtd->priv;
@@ -115,16 +151,6 @@
 #endif
 
 	end = instr->addr + instr->len - 1;
-
-	/* Is it past the end? */
-	if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
-		printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
-			(long)end, (long)mtd->size);
-#endif
-		return -EINVAL;
-	}
-
 	eoff_hi = end & ~(priv->asize - 1);
 	soff_hi = instr->addr & ~(priv->asize - 1);
 	eoff_lo = end & (priv->asize - 1);
@@ -178,18 +204,6 @@
 	printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
 #endif
 
-	if (from + len > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
-		printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
-			(long)from + len, (long)mtd->size);
-#endif
-		return -EINVAL;
-	}
-
-	/* can we return a physical address with this driver? */
-	if (phys)
-		return -EINVAL;
-
 	soff_hi = from & ~(priv->asize - 1);
 	soff_lo = from & (priv->asize - 1);
 
@@ -205,11 +219,12 @@
 	return 0;
 }
 
-static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
 #ifdef CONFIG_MTD_PMC551_DEBUG
 	printk(KERN_DEBUG "pmc551_unpoint()\n");
 #endif
+	return 0;
 }
 
 static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -228,16 +243,6 @@
 #endif
 
 	end = from + len - 1;
-
-	/* Is it past the end? */
-	if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
-		printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
-			(long)end, (long)mtd->size);
-#endif
-		return -EINVAL;
-	}
-
 	soff_hi = from & ~(priv->asize - 1);
 	eoff_hi = end & ~(priv->asize - 1);
 	soff_lo = from & (priv->asize - 1);
@@ -295,16 +300,6 @@
 #endif
 
 	end = to + len - 1;
-	/* Is it past the end?  or did the u32 wrap? */
-	if (end > mtd->size) {
-#ifdef CONFIG_MTD_PMC551_DEBUG
-		printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
-			"size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
-			(long)to);
-#endif
-		return -EINVAL;
-	}
-
 	soff_hi = to & ~(priv->asize - 1);
 	eoff_hi = end & ~(priv->asize - 1);
 	soff_lo = to & (priv->asize - 1);
@@ -358,7 +353,7 @@
  * mechanism
  * returns the size of the memory region found.
  */
-static u32 fixup_pmc551(struct pci_dev *dev)
+static int fixup_pmc551(struct pci_dev *dev)
 {
 #ifdef CONFIG_MTD_PMC551_BUGFIX
 	u32 dram_data;
@@ -668,7 +663,7 @@
 	struct mypriv *priv;
 	int found = 0;
 	struct mtd_info *mtd;
-	u32 length = 0;
+	int length = 0;
 
 	if (msize) {
 		msize = (1 << (ffs(msize) - 1)) << 20;
@@ -786,11 +781,11 @@
 
 		mtd->size = msize;
 		mtd->flags = MTD_CAP_RAM;
-		mtd->erase = pmc551_erase;
-		mtd->read = pmc551_read;
-		mtd->write = pmc551_write;
-		mtd->point = pmc551_point;
-		mtd->unpoint = pmc551_unpoint;
+		mtd->_erase = pmc551_erase;
+		mtd->_read = pmc551_read;
+		mtd->_write = pmc551_write;
+		mtd->_point = pmc551_point;
+		mtd->_unpoint = pmc551_unpoint;
 		mtd->type = MTD_RAM;
 		mtd->name = "PMC551 RAM board";
 		mtd->erasesize = 0x10000;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 2885941..8f52fc8 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -75,7 +75,7 @@
 static int slram_erase(struct mtd_info *, struct erase_info *);
 static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
 		resource_size_t *);
-static void slram_unpoint(struct mtd_info *, loff_t, size_t);
+static int slram_unpoint(struct mtd_info *, loff_t, size_t);
 static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
 
@@ -83,21 +83,13 @@
 {
 	slram_priv_t *priv = mtd->priv;
 
-	if (instr->addr + instr->len > mtd->size) {
-		return(-EINVAL);
-	}
-
 	memset(priv->start + instr->addr, 0xff, instr->len);
-
 	/* This'll catch a few races. Free the thing before returning :)
 	 * I don't feel at all ashamed. This kind of thing is possible anyway
 	 * with flash, but unlikely.
 	 */
-
 	instr->state = MTD_ERASE_DONE;
-
 	mtd_erase_callback(instr);
-
 	return(0);
 }
 
@@ -106,20 +98,14 @@
 {
 	slram_priv_t *priv = mtd->priv;
 
-	/* can we return a physical address with this driver? */
-	if (phys)
-		return -EINVAL;
-
-	if (from + len > mtd->size)
-		return -EINVAL;
-
 	*virt = priv->start + from;
 	*retlen = len;
 	return(0);
 }
 
-static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
+	return 0;
 }
 
 static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -127,14 +113,7 @@
 {
 	slram_priv_t *priv = mtd->priv;
 
-	if (from > mtd->size)
-		return -EINVAL;
-
-	if (from + len > mtd->size)
-		len = mtd->size - from;
-
 	memcpy(buf, priv->start + from, len);
-
 	*retlen = len;
 	return(0);
 }
@@ -144,11 +123,7 @@
 {
 	slram_priv_t *priv = mtd->priv;
 
-	if (to + len > mtd->size)
-		return -EINVAL;
-
 	memcpy(priv->start + to, buf, len);
-
 	*retlen = len;
 	return(0);
 }
@@ -199,11 +174,11 @@
 	(*curmtd)->mtdinfo->name = name;
 	(*curmtd)->mtdinfo->size = length;
 	(*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
-        (*curmtd)->mtdinfo->erase = slram_erase;
-	(*curmtd)->mtdinfo->point = slram_point;
-	(*curmtd)->mtdinfo->unpoint = slram_unpoint;
-	(*curmtd)->mtdinfo->read = slram_read;
-	(*curmtd)->mtdinfo->write = slram_write;
+	(*curmtd)->mtdinfo->_erase = slram_erase;
+	(*curmtd)->mtdinfo->_point = slram_point;
+	(*curmtd)->mtdinfo->_unpoint = slram_unpoint;
+	(*curmtd)->mtdinfo->_read = slram_read;
+	(*curmtd)->mtdinfo->_write = slram_write;
 	(*curmtd)->mtdinfo->owner = THIS_MODULE;
 	(*curmtd)->mtdinfo->type = MTD_RAM;
 	(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
new file mode 100644
index 0000000..797d43c
--- /dev/null
+++ b/drivers/mtd/devices/spear_smi.c
@@ -0,0 +1,1147 @@
+/*
+ * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
+ * SPEAr platform
+ * The serial nor interface is largely based on drivers/mtd/m25p80.c,
+ * however the SPI interface has been replaced by SMI.
+ *
+ * Copyright © 2010 STMicroelectronics.
+ * Ashish Priyadarshi
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spear_smi.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* SMI clock rate */
+#define SMI_MAX_CLOCK_FREQ	50000000 /* 50 MHz */
+
+/* MAX time out to safely come out of a erase or write busy conditions */
+#define SMI_PROBE_TIMEOUT	(HZ / 10)
+#define SMI_MAX_TIME_OUT	(3 * HZ)
+
+/* timeout for command completion */
+#define SMI_CMD_TIMEOUT		(HZ / 10)
+
+/* registers of smi */
+#define SMI_CR1		0x0	/* SMI control register 1 */
+#define SMI_CR2		0x4	/* SMI control register 2 */
+#define SMI_SR		0x8	/* SMI status register */
+#define SMI_TR		0xC	/* SMI transmit register */
+#define SMI_RR		0x10	/* SMI receive register */
+
+/* defines for control_reg 1 */
+#define BANK_EN		(0xF << 0)	/* enables all banks */
+#define DSEL_TIME	(0x6 << 4)	/* Deselect time 6 + 1 SMI_CK periods */
+#define SW_MODE		(0x1 << 28)	/* enables SW Mode */
+#define WB_MODE		(0x1 << 29)	/* Write Burst Mode */
+#define FAST_MODE	(0x1 << 15)	/* Fast Mode */
+#define HOLD1		(0x1 << 16)	/* Clock Hold period selection */
+
+/* defines for control_reg 2 */
+#define SEND		(0x1 << 7)	/* Send data */
+#define TFIE		(0x1 << 8)	/* Transmission Flag Interrupt Enable */
+#define WCIE		(0x1 << 9)	/* Write Complete Interrupt Enable */
+#define RD_STATUS_REG	(0x1 << 10)	/* reads status reg */
+#define WE		(0x1 << 11)	/* Write Enable */
+
+#define TX_LEN_SHIFT	0
+#define RX_LEN_SHIFT	4
+#define BANK_SHIFT	12
+
+/* defines for status register */
+#define SR_WIP		0x1	/* Write in progress */
+#define SR_WEL		0x2	/* Write enable latch */
+#define SR_BP0		0x4	/* Block protect 0 */
+#define SR_BP1		0x8	/* Block protect 1 */
+#define SR_BP2		0x10	/* Block protect 2 */
+#define SR_SRWD		0x80	/* SR write protect */
+#define TFF		0x100	/* Transfer Finished Flag */
+#define WCF		0x200	/* Transfer Finished Flag */
+#define ERF1		0x400	/* Forbidden Write Request */
+#define ERF2		0x800	/* Forbidden Access */
+
+#define WM_SHIFT	12
+
+/* flash opcodes */
+#define OPCODE_RDID	0x9f	/* Read JEDEC ID */
+
+/* Flash Device Ids maintenance section */
+
+/* data structure to maintain flash ids from different vendors */
+struct flash_device {
+	char *name;
+	u8 erase_cmd;
+	u32 device_id;
+	u32 pagesize;
+	unsigned long sectorsize;
+	unsigned long size_in_bytes;
+};
+
+#define FLASH_ID(n, es, id, psize, ssize, size)	\
+{				\
+	.name = n,		\
+	.erase_cmd = es,	\
+	.device_id = id,	\
+	.pagesize = psize,	\
+	.sectorsize = ssize,	\
+	.size_in_bytes = size	\
+}
+
+static struct flash_device flash_devices[] = {
+	FLASH_ID("st m25p16"     , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
+	FLASH_ID("st m25p32"     , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
+	FLASH_ID("st m25p64"     , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
+	FLASH_ID("st m25p128"    , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
+	FLASH_ID("st m25p05"     , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
+	FLASH_ID("st m25p10"     , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
+	FLASH_ID("st m25p20"     , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
+	FLASH_ID("st m25p40"     , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
+	FLASH_ID("st m25p80"     , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
+	FLASH_ID("st m45pe10"    , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
+	FLASH_ID("st m45pe20"    , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
+	FLASH_ID("st m45pe40"    , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
+	FLASH_ID("st m45pe80"    , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
+	FLASH_ID("sp s25fl004"   , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
+	FLASH_ID("sp s25fl008"   , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
+	FLASH_ID("sp s25fl016"   , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
+	FLASH_ID("sp s25fl032"   , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
+	FLASH_ID("sp s25fl064"   , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
+	FLASH_ID("atmel 25f512"  , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
+	FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
+	FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
+	FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
+	FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
+	FLASH_ID("mac 25l512"    , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
+	FLASH_ID("mac 25l1005"   , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
+	FLASH_ID("mac 25l2005"   , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
+	FLASH_ID("mac 25l4005"   , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+	FLASH_ID("mac 25l4005a"  , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+	FLASH_ID("mac 25l8005"   , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
+	FLASH_ID("mac 25l1605"   , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
+	FLASH_ID("mac 25l1605a"  , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
+	FLASH_ID("mac 25l3205"   , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+	FLASH_ID("mac 25l3205a"  , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+	FLASH_ID("mac 25l6405"   , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
+};
+
+/* Define spear specific structures */
+
+struct spear_snor_flash;
+
+/**
+ * struct spear_smi - Structure for SMI Device
+ *
+ * @clk: functional clock
+ * @status: current status register of SMI.
+ * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
+ * @lock: lock to prevent parallel access of SMI.
+ * @io_base: base address for registers of SMI.
+ * @pdev: platform device
+ * @cmd_complete: queue to wait for command completion of NOR-flash.
+ * @num_flashes: number of flashes actually present on board.
+ * @flash: separate structure for each Serial NOR-flash attached to SMI.
+ */
+struct spear_smi {
+	struct clk *clk;
+	u32 status;
+	unsigned long clk_rate;
+	struct mutex lock;
+	void __iomem *io_base;
+	struct platform_device *pdev;
+	wait_queue_head_t cmd_complete;
+	u32 num_flashes;
+	struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
+};
+
+/**
+ * struct spear_snor_flash - Structure for Serial NOR Flash
+ *
+ * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
+ * @dev_id: Device ID of NOR-flash.
+ * @lock: lock to manage flash read, write and erase operations
+ * @mtd: MTD info for each NOR-flash.
+ * @num_parts: Total number of partition in each bank of NOR-flash.
+ * @parts: Partition info for each bank of NOR-flash.
+ * @page_size: Page size of NOR-flash.
+ * @base_addr: Base address of NOR-flash.
+ * @erase_cmd: erase command may vary on different flash types
+ * @fast_mode: flash supports read in fast mode
+ */
+struct spear_snor_flash {
+	u32 bank;
+	u32 dev_id;
+	struct mutex lock;
+	struct mtd_info mtd;
+	u32 num_parts;
+	struct mtd_partition *parts;
+	u32 page_size;
+	void __iomem *base_addr;
+	u8 erase_cmd;
+	u8 fast_mode;
+};
+
+static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
+{
+	return container_of(mtd, struct spear_snor_flash, mtd);
+}
+
+/**
+ * spear_smi_read_sr - Read status register of flash through SMI
+ * @dev: structure of SMI information.
+ * @bank: bank to which flash is connected
+ *
+ * This routine will return the status register of the flash chip present at the
+ * given bank.
+ */
+static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
+{
+	int ret;
+	u32 ctrlreg1;
+
+	mutex_lock(&dev->lock);
+	dev->status = 0; /* Will be set in interrupt handler */
+
+	ctrlreg1 = readl(dev->io_base + SMI_CR1);
+	/* program smi in hw mode */
+	writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
+
+	/* performing a rsr instruction in hw mode */
+	writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
+			dev->io_base + SMI_CR2);
+
+	/* wait for tff */
+	ret = wait_event_interruptible_timeout(dev->cmd_complete,
+			dev->status & TFF, SMI_CMD_TIMEOUT);
+
+	/* copy dev->status (lower 16 bits) in order to release lock */
+	if (ret > 0)
+		ret = dev->status & 0xffff;
+	else
+		ret = -EIO;
+
+	/* restore the ctrl regs state */
+	writel(ctrlreg1, dev->io_base + SMI_CR1);
+	writel(0, dev->io_base + SMI_CR2);
+	mutex_unlock(&dev->lock);
+
+	return ret;
+}
+
+/**
+ * spear_smi_wait_till_ready - wait till flash is ready
+ * @dev: structure of SMI information.
+ * @bank: flash corresponding to this bank
+ * @timeout: timeout for busy wait condition
+ *
+ * This routine checks for WIP (write in progress) bit in Status register
+ * If successful the routine returns 0 else -EBUSY
+ */
+static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
+		unsigned long timeout)
+{
+	unsigned long finish;
+	int status;
+
+	finish = jiffies + timeout;
+	do {
+		status = spear_smi_read_sr(dev, bank);
+		if (status < 0)
+			continue; /* try till timeout */
+		else if (!(status & SR_WIP))
+			return 0;
+
+		cond_resched();
+	} while (!time_after_eq(jiffies, finish));
+
+	dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
+	return status;
+}
+
+/**
+ * spear_smi_int_handler - SMI Interrupt Handler.
+ * @irq: irq number
+ * @dev_id: structure of SMI device, embedded in dev_id.
+ *
+ * The handler clears all interrupt conditions and records the status in
+ * dev->status which is used by the driver later.
+ */
+static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
+{
+	u32 status = 0;
+	struct spear_smi *dev = dev_id;
+
+	status = readl(dev->io_base + SMI_SR);
+
+	if (unlikely(!status))
+		return IRQ_NONE;
+
+	/* clear all interrupt conditions */
+	writel(0, dev->io_base + SMI_SR);
+
+	/* copy the status register in dev->status */
+	dev->status |= status;
+
+	/* send the completion */
+	wake_up_interruptible(&dev->cmd_complete);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * spear_smi_hw_init - initializes the smi controller.
+ * @dev: structure of smi device
+ *
+ * this routine initializes the smi controller wit the default values
+ */
+static void spear_smi_hw_init(struct spear_smi *dev)
+{
+	unsigned long rate = 0;
+	u32 prescale = 0;
+	u32 val;
+
+	rate = clk_get_rate(dev->clk);
+
+	/* functional clock of smi */
+	prescale = DIV_ROUND_UP(rate, dev->clk_rate);
+
+	/*
+	 * setting the standard values, fast mode, prescaler for
+	 * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
+	 */
+	val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
+
+	mutex_lock(&dev->lock);
+	writel(val, dev->io_base + SMI_CR1);
+	mutex_unlock(&dev->lock);
+}
+
+/**
+ * get_flash_index - match chip id from a flash list.
+ * @flash_id: a valid nor flash chip id obtained from board.
+ *
+ * try to validate the chip id by matching from a list, if not found then simply
+ * returns negative. In case of success returns index in to the flash devices
+ * array.
+ */
+static int get_flash_index(u32 flash_id)
+{
+	int index;
+
+	/* Matches chip-id to entire list of 'serial-nor flash' ids */
+	for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
+		if (flash_devices[index].device_id == flash_id)
+			return index;
+	}
+
+	/* Memory chip is not listed and not supported */
+	return -ENODEV;
+}
+
+/**
+ * spear_smi_write_enable - Enable the flash to do write operation
+ * @dev: structure of SMI device
+ * @bank: enable write for flash connected to this bank
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns 0 on success.
+ */
+static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
+{
+	int ret;
+	u32 ctrlreg1;
+
+	mutex_lock(&dev->lock);
+	dev->status = 0; /* Will be set in interrupt handler */
+
+	ctrlreg1 = readl(dev->io_base + SMI_CR1);
+	/* program smi in h/w mode */
+	writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
+
+	/* give the flash, write enable command */
+	writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
+
+	ret = wait_event_interruptible_timeout(dev->cmd_complete,
+			dev->status & TFF, SMI_CMD_TIMEOUT);
+
+	/* restore the ctrl regs state */
+	writel(ctrlreg1, dev->io_base + SMI_CR1);
+	writel(0, dev->io_base + SMI_CR2);
+
+	if (ret <= 0) {
+		ret = -EIO;
+		dev_err(&dev->pdev->dev,
+			"smi controller failed on write enable\n");
+	} else {
+		/* check whether write mode status is set for required bank */
+		if (dev->status & (1 << (bank + WM_SHIFT)))
+			ret = 0;
+		else {
+			dev_err(&dev->pdev->dev, "couldn't enable write\n");
+			ret = -EIO;
+		}
+	}
+
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+static inline u32
+get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
+{
+	u32 cmd;
+	u8 *x = (u8 *)&cmd;
+
+	x[0] = flash->erase_cmd;
+	x[1] = offset >> 16;
+	x[2] = offset >> 8;
+	x[3] = offset;
+
+	return cmd;
+}
+
+/**
+ * spear_smi_erase_sector - erase one sector of flash
+ * @dev: structure of SMI information
+ * @command: erase command to be send
+ * @bank: bank to which this command needs to be send
+ * @bytes: size of command
+ *
+ * Erase one sector of flash memory at offset ``offset'' which is any
+ * address within the sector which should be erased.
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int spear_smi_erase_sector(struct spear_smi *dev,
+		u32 bank, u32 command, u32 bytes)
+{
+	u32 ctrlreg1 = 0;
+	int ret;
+
+	ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+	if (ret)
+		return ret;
+
+	ret = spear_smi_write_enable(dev, bank);
+	if (ret)
+		return ret;
+
+	mutex_lock(&dev->lock);
+
+	ctrlreg1 = readl(dev->io_base + SMI_CR1);
+	writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
+
+	/* send command in sw mode */
+	writel(command, dev->io_base + SMI_TR);
+
+	writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
+			dev->io_base + SMI_CR2);
+
+	ret = wait_event_interruptible_timeout(dev->cmd_complete,
+			dev->status & TFF, SMI_CMD_TIMEOUT);
+
+	if (ret <= 0) {
+		ret = -EIO;
+		dev_err(&dev->pdev->dev, "sector erase failed\n");
+	} else
+		ret = 0; /* success */
+
+	/* restore ctrl regs */
+	writel(ctrlreg1, dev->io_base + SMI_CR1);
+	writel(0, dev->io_base + SMI_CR2);
+
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+/**
+ * spear_mtd_erase - perform flash erase operation as requested by user
+ * @mtd: Provides the memory characteristics
+ * @e_info: Provides the erase information
+ *
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
+{
+	struct spear_snor_flash *flash = get_flash_data(mtd);
+	struct spear_smi *dev = mtd->priv;
+	u32 addr, command, bank;
+	int len, ret;
+
+	if (!flash || !dev)
+		return -ENODEV;
+
+	bank = flash->bank;
+	if (bank > dev->num_flashes - 1) {
+		dev_err(&dev->pdev->dev, "Invalid Bank Num");
+		return -EINVAL;
+	}
+
+	addr = e_info->addr;
+	len = e_info->len;
+
+	mutex_lock(&flash->lock);
+
+	/* now erase sectors in loop */
+	while (len) {
+		command = get_sector_erase_cmd(flash, addr);
+		/* preparing the command for flash */
+		ret = spear_smi_erase_sector(dev, bank, command, 4);
+		if (ret) {
+			e_info->state = MTD_ERASE_FAILED;
+			mutex_unlock(&flash->lock);
+			return ret;
+		}
+		addr += mtd->erasesize;
+		len -= mtd->erasesize;
+	}
+
+	mutex_unlock(&flash->lock);
+	e_info->state = MTD_ERASE_DONE;
+	mtd_erase_callback(e_info);
+
+	return 0;
+}
+
+/**
+ * spear_mtd_read - performs flash read operation as requested by the user
+ * @mtd: MTD information of the memory bank
+ * @from: Address from which to start read
+ * @len: Number of bytes to be read
+ * @retlen: Fills the Number of bytes actually read
+ * @buf: Fills this after reading
+ *
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+		size_t *retlen, u8 *buf)
+{
+	struct spear_snor_flash *flash = get_flash_data(mtd);
+	struct spear_smi *dev = mtd->priv;
+	void *src;
+	u32 ctrlreg1, val;
+	int ret;
+
+	if (!flash || !dev)
+		return -ENODEV;
+
+	if (flash->bank > dev->num_flashes - 1) {
+		dev_err(&dev->pdev->dev, "Invalid Bank Num");
+		return -EINVAL;
+	}
+
+	/* select address as per bank number */
+	src = flash->base_addr + from;
+
+	mutex_lock(&flash->lock);
+
+	/* wait till previous write/erase is done. */
+	ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
+	if (ret) {
+		mutex_unlock(&flash->lock);
+		return ret;
+	}
+
+	mutex_lock(&dev->lock);
+	/* put smi in hw mode not wbt mode */
+	ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
+	val &= ~(SW_MODE | WB_MODE);
+	if (flash->fast_mode)
+		val |= FAST_MODE;
+
+	writel(val, dev->io_base + SMI_CR1);
+
+	memcpy_fromio(buf, (u8 *)src, len);
+
+	/* restore ctrl reg1 */
+	writel(ctrlreg1, dev->io_base + SMI_CR1);
+	mutex_unlock(&dev->lock);
+
+	*retlen = len;
+	mutex_unlock(&flash->lock);
+
+	return 0;
+}
+
+static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
+		void *dest, const void *src, size_t len)
+{
+	int ret;
+	u32 ctrlreg1;
+
+	/* wait until finished previous write command. */
+	ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+	if (ret)
+		return ret;
+
+	/* put smi in write enable */
+	ret = spear_smi_write_enable(dev, bank);
+	if (ret)
+		return ret;
+
+	/* put smi in hw, write burst mode */
+	mutex_lock(&dev->lock);
+
+	ctrlreg1 = readl(dev->io_base + SMI_CR1);
+	writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
+
+	memcpy_toio(dest, src, len);
+
+	writel(ctrlreg1, dev->io_base + SMI_CR1);
+
+	mutex_unlock(&dev->lock);
+	return 0;
+}
+
+/**
+ * spear_mtd_write - performs write operation as requested by the user.
+ * @mtd: MTD information of the memory bank.
+ * @to:	Address to write.
+ * @len: Number of bytes to be written.
+ * @retlen: Number of bytes actually wrote.
+ * @buf: Buffer from which the data to be taken.
+ *
+ * Write an address range to the flash chip. Data must be written in
+ * flash_page_size chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+		size_t *retlen, const u8 *buf)
+{
+	struct spear_snor_flash *flash = get_flash_data(mtd);
+	struct spear_smi *dev = mtd->priv;
+	void *dest;
+	u32 page_offset, page_size;
+	int ret;
+
+	if (!flash || !dev)
+		return -ENODEV;
+
+	if (flash->bank > dev->num_flashes - 1) {
+		dev_err(&dev->pdev->dev, "Invalid Bank Num");
+		return -EINVAL;
+	}
+
+	/* select address as per bank number */
+	dest = flash->base_addr + to;
+	mutex_lock(&flash->lock);
+
+	page_offset = (u32)to % flash->page_size;
+
+	/* do if all the bytes fit onto one page */
+	if (page_offset + len <= flash->page_size) {
+		ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
+		if (!ret)
+			*retlen += len;
+	} else {
+		u32 i;
+
+		/* the size of data remaining on the first page */
+		page_size = flash->page_size - page_offset;
+
+		ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
+				page_size);
+		if (ret)
+			goto err_write;
+		else
+			*retlen += page_size;
+
+		/* write everything in pagesize chunks */
+		for (i = page_size; i < len; i += page_size) {
+			page_size = len - i;
+			if (page_size > flash->page_size)
+				page_size = flash->page_size;
+
+			ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
+					buf + i, page_size);
+			if (ret)
+				break;
+			else
+				*retlen += page_size;
+		}
+	}
+
+err_write:
+	mutex_unlock(&flash->lock);
+
+	return ret;
+}
+
+/**
+ * spear_smi_probe_flash - Detects the NOR Flash chip.
+ * @dev: structure of SMI information.
+ * @bank: bank on which flash must be probed
+ *
+ * This routine will check whether there exists a flash chip on a given memory
+ * bank ID.
+ * Return index of the probed flash in flash devices structure
+ */
+static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
+{
+	int ret;
+	u32 val = 0;
+
+	ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
+	if (ret)
+		return ret;
+
+	mutex_lock(&dev->lock);
+
+	dev->status = 0; /* Will be set in interrupt handler */
+	/* put smi in sw mode */
+	val = readl(dev->io_base + SMI_CR1);
+	writel(val | SW_MODE, dev->io_base + SMI_CR1);
+
+	/* send readid command in sw mode */
+	writel(OPCODE_RDID, dev->io_base + SMI_TR);
+
+	val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
+		(3 << RX_LEN_SHIFT) | TFIE;
+	writel(val, dev->io_base + SMI_CR2);
+
+	/* wait for TFF */
+	ret = wait_event_interruptible_timeout(dev->cmd_complete,
+			dev->status & TFF, SMI_CMD_TIMEOUT);
+	if (ret <= 0) {
+		ret = -ENODEV;
+		goto err_probe;
+	}
+
+	/* get memory chip id */
+	val = readl(dev->io_base + SMI_RR);
+	val &= 0x00ffffff;
+	ret = get_flash_index(val);
+
+err_probe:
+	/* clear sw mode */
+	val = readl(dev->io_base + SMI_CR1);
+	writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
+
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+
+#ifdef CONFIG_OF
+static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
+					       struct device_node *np)
+{
+	struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
+	struct device_node *pp = NULL;
+	const __be32 *addr;
+	u32 val;
+	int len;
+	int i = 0;
+
+	if (!np)
+		return -ENODEV;
+
+	of_property_read_u32(np, "clock-rate", &val);
+	pdata->clk_rate = val;
+
+	pdata->board_flash_info = devm_kzalloc(&pdev->dev,
+					       sizeof(*pdata->board_flash_info),
+					       GFP_KERNEL);
+
+	/* Fill structs for each subnode (flash device) */
+	while ((pp = of_get_next_child(np, pp))) {
+		struct spear_smi_flash_info *flash_info;
+
+		flash_info = &pdata->board_flash_info[i];
+		pdata->np[i] = pp;
+
+		/* Read base-addr and size from DT */
+		addr = of_get_property(pp, "reg", &len);
+		pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
+		pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
+
+		if (of_get_property(pp, "st,smi-fast-mode", NULL))
+			pdata->board_flash_info->fast_mode = 1;
+
+		i++;
+	}
+
+	pdata->num_flashes = i;
+
+	return 0;
+}
+#else
+static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
+					       struct device_node *np)
+{
+	return -ENOSYS;
+}
+#endif
+
+static int spear_smi_setup_banks(struct platform_device *pdev,
+				 u32 bank, struct device_node *np)
+{
+	struct spear_smi *dev = platform_get_drvdata(pdev);
+	struct mtd_part_parser_data ppdata = {};
+	struct spear_smi_flash_info *flash_info;
+	struct spear_smi_plat_data *pdata;
+	struct spear_snor_flash *flash;
+	struct mtd_partition *parts = NULL;
+	int count = 0;
+	int flash_index;
+	int ret = 0;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (bank > pdata->num_flashes - 1)
+		return -EINVAL;
+
+	flash_info = &pdata->board_flash_info[bank];
+	if (!flash_info)
+		return -ENODEV;
+
+	flash = kzalloc(sizeof(*flash), GFP_ATOMIC);
+	if (!flash)
+		return -ENOMEM;
+	flash->bank = bank;
+	flash->fast_mode = flash_info->fast_mode ? 1 : 0;
+	mutex_init(&flash->lock);
+
+	/* verify whether nor flash is really present on board */
+	flash_index = spear_smi_probe_flash(dev, bank);
+	if (flash_index < 0) {
+		dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
+		ret = flash_index;
+		goto err_probe;
+	}
+	/* map the memory for nor flash chip */
+	flash->base_addr = ioremap(flash_info->mem_base, flash_info->size);
+	if (!flash->base_addr) {
+		ret = -EIO;
+		goto err_probe;
+	}
+
+	dev->flash[bank] = flash;
+	flash->mtd.priv = dev;
+
+	if (flash_info->name)
+		flash->mtd.name = flash_info->name;
+	else
+		flash->mtd.name = flash_devices[flash_index].name;
+
+	flash->mtd.type = MTD_NORFLASH;
+	flash->mtd.writesize = 1;
+	flash->mtd.flags = MTD_CAP_NORFLASH;
+	flash->mtd.size = flash_info->size;
+	flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
+	flash->page_size = flash_devices[flash_index].pagesize;
+	flash->mtd.writebufsize = flash->page_size;
+	flash->erase_cmd = flash_devices[flash_index].erase_cmd;
+	flash->mtd._erase = spear_mtd_erase;
+	flash->mtd._read = spear_mtd_read;
+	flash->mtd._write = spear_mtd_write;
+	flash->dev_id = flash_devices[flash_index].device_id;
+
+	dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
+			flash->mtd.name, flash->mtd.size,
+			flash->mtd.size / (1024 * 1024));
+
+	dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
+			flash->mtd.erasesize, flash->mtd.erasesize / 1024);
+
+#ifndef CONFIG_OF
+	if (flash_info->partitions) {
+		parts = flash_info->partitions;
+		count = flash_info->nr_partitions;
+	}
+#endif
+	ppdata.of_node = np;
+
+	ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts,
+					count);
+	if (ret) {
+		dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
+		goto err_map;
+	}
+
+	return 0;
+
+err_map:
+	iounmap(flash->base_addr);
+
+err_probe:
+	kfree(flash);
+	return ret;
+}
+
+/**
+ * spear_smi_probe - Entry routine
+ * @pdev: platform device structure
+ *
+ * This is the first routine which gets invoked during booting and does all
+ * initialization/allocation work. The routine looks for available memory banks,
+ * and do proper init for any found one.
+ * Returns 0 on success, non zero otherwise
+ */
+static int __devinit spear_smi_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct spear_smi_plat_data *pdata = NULL;
+	struct spear_smi *dev;
+	struct resource *smi_base;
+	int irq, ret = 0;
+	int i;
+
+	if (np) {
+		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+		if (!pdata) {
+			pr_err("%s: ERROR: no memory", __func__);
+			ret = -ENOMEM;
+			goto err;
+		}
+		pdev->dev.platform_data = pdata;
+		ret = spear_smi_probe_config_dt(pdev, np);
+		if (ret) {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "no platform data\n");
+			goto err;
+		}
+	} else {
+		pdata = dev_get_platdata(&pdev->dev);
+		if (pdata < 0) {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "no platform data\n");
+			goto err;
+		}
+	}
+
+	smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!smi_base) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "invalid smi base address\n");
+		goto err;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "invalid smi irq\n");
+		goto err;
+	}
+
+	dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
+	if (!dev) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "mem alloc fail\n");
+		goto err;
+	}
+
+	smi_base = request_mem_region(smi_base->start, resource_size(smi_base),
+			pdev->name);
+	if (!smi_base) {
+		ret = -EBUSY;
+		dev_err(&pdev->dev, "request mem region fail\n");
+		goto err_mem;
+	}
+
+	dev->io_base = ioremap(smi_base->start, resource_size(smi_base));
+	if (!dev->io_base) {
+		ret = -EIO;
+		dev_err(&pdev->dev, "ioremap fail\n");
+		goto err_ioremap;
+	}
+
+	dev->pdev = pdev;
+	dev->clk_rate = pdata->clk_rate;
+
+	if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ)
+		dev->clk_rate = SMI_MAX_CLOCK_FREQ;
+
+	dev->num_flashes = pdata->num_flashes;
+
+	if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
+		dev_err(&pdev->dev, "exceeding max number of flashes\n");
+		dev->num_flashes = MAX_NUM_FLASH_CHIP;
+	}
+
+	dev->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dev->clk)) {
+		ret = PTR_ERR(dev->clk);
+		goto err_clk;
+	}
+
+	ret = clk_enable(dev->clk);
+	if (ret)
+		goto err_clk_enable;
+
+	ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
+	if (ret) {
+		dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
+		goto err_irq;
+	}
+
+	mutex_init(&dev->lock);
+	init_waitqueue_head(&dev->cmd_complete);
+	spear_smi_hw_init(dev);
+	platform_set_drvdata(pdev, dev);
+
+	/* loop for each serial nor-flash which is connected to smi */
+	for (i = 0; i < dev->num_flashes; i++) {
+		ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
+		if (ret) {
+			dev_err(&dev->pdev->dev, "bank setup failed\n");
+			goto err_bank_setup;
+		}
+	}
+
+	return 0;
+
+err_bank_setup:
+	free_irq(irq, dev);
+	platform_set_drvdata(pdev, NULL);
+err_irq:
+	clk_disable(dev->clk);
+err_clk_enable:
+	clk_put(dev->clk);
+err_clk:
+	iounmap(dev->io_base);
+err_ioremap:
+	release_mem_region(smi_base->start, resource_size(smi_base));
+err_mem:
+	kfree(dev);
+err:
+	return ret;
+}
+
+/**
+ * spear_smi_remove - Exit routine
+ * @pdev: platform device structure
+ *
+ * free all allocations and delete the partitions.
+ */
+static int __devexit spear_smi_remove(struct platform_device *pdev)
+{
+	struct spear_smi *dev;
+	struct spear_smi_plat_data *pdata;
+	struct spear_snor_flash *flash;
+	struct resource *smi_base;
+	int ret;
+	int i, irq;
+
+	dev = platform_get_drvdata(pdev);
+	if (!dev) {
+		dev_err(&pdev->dev, "dev is null\n");
+		return -ENODEV;
+	}
+
+	pdata = dev_get_platdata(&pdev->dev);
+
+	/* clean up for all nor flash */
+	for (i = 0; i < dev->num_flashes; i++) {
+		flash = dev->flash[i];
+		if (!flash)
+			continue;
+
+		/* clean up mtd stuff */
+		ret = mtd_device_unregister(&flash->mtd);
+		if (ret)
+			dev_err(&pdev->dev, "error removing mtd\n");
+
+		iounmap(flash->base_addr);
+		kfree(flash);
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	free_irq(irq, dev);
+
+	clk_disable(dev->clk);
+	clk_put(dev->clk);
+	iounmap(dev->io_base);
+	kfree(dev);
+
+	smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(smi_base->start, resource_size(smi_base));
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct spear_smi *dev = platform_get_drvdata(pdev);
+
+	if (dev && dev->clk)
+		clk_disable(dev->clk);
+
+	return 0;
+}
+
+int spear_smi_resume(struct platform_device *pdev)
+{
+	struct spear_smi *dev = platform_get_drvdata(pdev);
+	int ret = -EPERM;
+
+	if (dev && dev->clk)
+		ret = clk_enable(dev->clk);
+
+	if (!ret)
+		spear_smi_hw_init(dev);
+	return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spear_smi_id_table[] = {
+	{ .compatible = "st,spear600-smi" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, spear_smi_id_table);
+#endif
+
+static struct platform_driver spear_smi_driver = {
+	.driver = {
+		.name = "smi",
+		.bus = &platform_bus_type,
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(spear_smi_id_table),
+	},
+	.probe = spear_smi_probe,
+	.remove = __devexit_p(spear_smi_remove),
+	.suspend = spear_smi_suspend,
+	.resume = spear_smi_resume,
+};
+
+static int spear_smi_init(void)
+{
+	return platform_driver_register(&spear_smi_driver);
+}
+module_init(spear_smi_init);
+
+static void spear_smi_exit(void)
+{
+	platform_driver_unregister(&spear_smi_driver);
+}
+module_exit(spear_smi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
+MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 5fc1983..ab8a2f4 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -175,9 +175,6 @@
 	int err;
 
 	/* Sanity checks */
-	if (instr->addr + instr->len > flash->mtd.size)
-		return -EINVAL;
-
 	if ((uint32_t)instr->len % mtd->erasesize)
 		return -EINVAL;
 
@@ -223,16 +220,6 @@
 	unsigned char command[4];
 	int ret;
 
-	/* Sanity checking */
-	if (len == 0)
-		return 0;
-
-	if (from + len > flash->mtd.size)
-		return -EINVAL;
-
-	if (retlen)
-		*retlen = 0;
-
 	spi_message_init(&message);
 	memset(&transfer, 0, sizeof(transfer));
 
@@ -274,13 +261,6 @@
 	int i, j, ret, bytes, copied = 0;
 	unsigned char command[5];
 
-	/* Sanity checks */
-	if (!len)
-		return 0;
-
-	if (to + len > flash->mtd.size)
-		return -EINVAL;
-
 	if ((uint32_t)to % mtd->writesize)
 		return -EINVAL;
 
@@ -402,10 +382,11 @@
 	flash->mtd.flags	= MTD_CAP_NORFLASH;
 	flash->mtd.erasesize	= flash_info->erase_size;
 	flash->mtd.writesize	= flash_info->page_size;
+	flash->mtd.writebufsize	= flash_info->page_size;
 	flash->mtd.size		= flash_info->page_size * flash_info->nr_pages;
-	flash->mtd.erase	= sst25l_erase;
-	flash->mtd.read		= sst25l_read;
-	flash->mtd.write 	= sst25l_write;
+	flash->mtd._erase	= sst25l_erase;
+	flash->mtd._read		= sst25l_read;
+	flash->mtd._write 	= sst25l_write;
 
 	dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
 		 (long long)flash->mtd.size >> 10);
@@ -418,9 +399,9 @@
 	      flash->mtd.numeraseregions);
 
 
-	ret = mtd_device_parse_register(&flash->mtd, NULL, 0,
-			data ? data->parts : NULL,
-			data ? data->nr_parts : 0);
+	ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
+					data ? data->parts : NULL,
+					data ? data->nr_parts : 0);
 	if (ret) {
 		kfree(flash);
 		dev_set_drvdata(&spi->dev, NULL);
@@ -450,18 +431,7 @@
 	.remove		= __devexit_p(sst25l_remove),
 };
 
-static int __init sst25l_init(void)
-{
-	return spi_register_driver(&sst25l_driver);
-}
-
-static void __exit sst25l_exit(void)
-{
-	spi_unregister_driver(&sst25l_driver);
-}
-
-module_init(sst25l_init);
-module_exit(sst25l_exit);
+module_spi_driver(sst25l_driver);
 
 MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
 MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 28646c9..3af3514 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -56,7 +56,7 @@
 	if (memcmp(mtd->name, "DiskOnChip", 10))
 		return;
 
-	if (!mtd->block_isbad) {
+	if (!mtd->_block_isbad) {
 		printk(KERN_ERR
 "INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
 "Please use the new diskonchip driver under the NAND subsystem.\n");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 536bbce..d3cfe26b 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -40,7 +40,7 @@
 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
 			size_t *retlen, void **mtdbuf, resource_size_t *phys);
-static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
+static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
 static int get_chip(struct map_info *map, struct flchip *chip, int mode);
 static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
 static void put_chip(struct map_info *map, struct flchip *chip);
@@ -63,18 +63,18 @@
 	mtd->type = MTD_NORFLASH;
 
 	/* Fill in the default mtd operations */
-	mtd->read = lpddr_read;
+	mtd->_read = lpddr_read;
 	mtd->type = MTD_NORFLASH;
 	mtd->flags = MTD_CAP_NORFLASH;
 	mtd->flags &= ~MTD_BIT_WRITEABLE;
-	mtd->erase = lpddr_erase;
-	mtd->write = lpddr_write_buffers;
-	mtd->writev = lpddr_writev;
-	mtd->lock = lpddr_lock;
-	mtd->unlock = lpddr_unlock;
+	mtd->_erase = lpddr_erase;
+	mtd->_write = lpddr_write_buffers;
+	mtd->_writev = lpddr_writev;
+	mtd->_lock = lpddr_lock;
+	mtd->_unlock = lpddr_unlock;
 	if (map_is_linear(map)) {
-		mtd->point = lpddr_point;
-		mtd->unpoint = lpddr_unpoint;
+		mtd->_point = lpddr_point;
+		mtd->_unpoint = lpddr_unpoint;
 	}
 	mtd->size = 1 << lpddr->qinfo->DevSizeShift;
 	mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
@@ -530,14 +530,12 @@
 	struct flchip *chip = &lpddr->chips[chipnum];
 	int ret = 0;
 
-	if (!map->virt || (adr + len > mtd->size))
+	if (!map->virt)
 		return -EINVAL;
 
 	/* ofs: offset within the first chip that the first read should start */
 	ofs = adr - (chipnum << lpddr->chipshift);
-
 	*mtdbuf = (void *)map->virt + chip->start + ofs;
-	*retlen = 0;
 
 	while (len) {
 		unsigned long thislen;
@@ -575,11 +573,11 @@
 	return 0;
 }
 
-static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
+static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
 {
 	struct map_info *map = mtd->priv;
 	struct lpddr_private *lpddr = map->fldrv_priv;
-	int chipnum = adr >> lpddr->chipshift;
+	int chipnum = adr >> lpddr->chipshift, err = 0;
 	unsigned long ofs;
 
 	/* ofs: offset within the first chip that the first read should start */
@@ -603,9 +601,11 @@
 			chip->ref_point_counter--;
 			if (chip->ref_point_counter == 0)
 				chip->state = FL_READY;
-		} else
+		} else {
 			printk(KERN_WARNING "%s: Warning: unpoint called on non"
 					"pointed region\n", map->name);
+			err = -EINVAL;
+		}
 
 		put_chip(map, chip);
 		mutex_unlock(&chip->mutex);
@@ -614,6 +614,8 @@
 		ofs = 0;
 		chipnum++;
 	}
+
+	return err;
 }
 
 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
@@ -637,13 +639,11 @@
 	int chipnum;
 	unsigned long ofs, vec_seek, i;
 	int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
-
 	size_t len = 0;
 
 	for (i = 0; i < count; i++)
 		len += vecs[i].iov_len;
 
-	*retlen = 0;
 	if (!len)
 		return 0;
 
@@ -688,9 +688,6 @@
 	ofs = instr->addr;
 	len = instr->len;
 
-	if (ofs > mtd->size || (len + ofs) > mtd->size)
-		return -EINVAL;
-
 	while (len > 0) {
 		ret = do_erase_oneblock(mtd, ofs);
 		if (ret)
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 650126c..ef5cde8 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -164,8 +164,8 @@
 		return -ENXIO;
 	}
 
-	mtd_device_parse_register(state->mtd, part_probe_types, 0,
-			pdata->parts, pdata->nr_parts);
+	mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+				  pdata->parts, pdata->nr_parts);
 
 	platform_set_drvdata(pdev, state);
 
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index f43b365..080f060 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -196,7 +196,7 @@
 
 	dc21285_mtd->owner = THIS_MODULE;
 
-	mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0);
+	mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
 
 	if(machine_is_ebsa285()) {
 		/*
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 33cce89..e4de96b 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -252,8 +252,8 @@
 	}
 
 
-	mtd_device_parse_register(state->mtd, part_probe_types, 0,
-			pdata->parts, pdata->nr_parts);
+	mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+				  pdata->parts, pdata->nr_parts);
 
 	return 0;
 }
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 49c1418..8ed6cb4 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -85,8 +85,8 @@
 	if (mymtd) {
 		mymtd->owner = THIS_MODULE;
 
-		mtd_device_parse_register(mymtd, NULL, 0,
-				h720x_partitions, NUM_PARTITIONS);
+		mtd_device_parse_register(mymtd, NULL, NULL,
+					  h720x_partitions, NUM_PARTITIONS);
 		return 0;
 	}
 
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index f47aedb..834a06c 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -91,7 +91,7 @@
 		if (impa7_mtd[i]) {
 			impa7_mtd[i]->owner = THIS_MODULE;
 			devicesfound++;
-			mtd_device_parse_register(impa7_mtd[i], NULL, 0,
+			mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
 						  partitions,
 						  ARRAY_SIZE(partitions));
 		}
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 08c2396..92e1f41 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -72,7 +72,7 @@
 {
 	/* register the flash bank */
 	/* partition the flash bank */
-	return mtd_device_parse_register(p->info, NULL, 0, NULL, 0);
+	return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
 }
 
 static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index fc7d4d0..4a41ced 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -226,7 +226,7 @@
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+	err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0);
 	if (err)
 		goto Error;
 
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 8b54101..e864fc6 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -182,6 +182,9 @@
 {
 	struct flash_platform_data *plat = dev->dev.platform_data;
 	struct ixp4xx_flash_info *info;
+	struct mtd_part_parser_data ppdata = {
+		.origin = dev->resource->start,
+	};
 	int err = -1;
 
 	if (!plat)
@@ -247,7 +250,7 @@
 	/* Use the fast version */
 	info->map.write = ixp4xx_write16;
 
-	err = mtd_device_parse_register(info->mtd, probes, dev->resource->start,
+	err = mtd_device_parse_register(info->mtd, probes, &ppdata,
 			plat->parts, plat->nr_parts);
 	if (err) {
 		printk(KERN_ERR "Could not parse partitions\n");
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index dd0360b..74bd98e 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -27,17 +27,21 @@
 
 
 /* Is this really the vpp port? */
+static DEFINE_SPINLOCK(l440gx_vpp_lock);
+static int l440gx_vpp_refcnt;
 static void l440gx_set_vpp(struct map_info *map, int vpp)
 {
-	unsigned long l;
+	unsigned long flags;
 
-	l = inl(VPP_PORT);
+	spin_lock_irqsave(&l440gx_vpp_lock, flags);
 	if (vpp) {
-		l |= 1;
+		if (++l440gx_vpp_refcnt == 1)   /* first nested 'on' */
+			outl(inl(VPP_PORT) | 1, VPP_PORT);
 	} else {
-		l &= ~1;
+		if (--l440gx_vpp_refcnt == 0)   /* last nested 'off' */
+			outl(inl(VPP_PORT) & ~1, VPP_PORT);
 	}
-	outl(l, VPP_PORT);
+	spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
 }
 
 static struct map_info l440gx_map = {
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 7b889de..b5401e3 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -45,6 +45,7 @@
 };
 
 static char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
 
 static map_word
 ltq_read16(struct map_info *map, unsigned long adr)
@@ -168,8 +169,9 @@
 	cfi->addr_unlock1 ^= 1;
 	cfi->addr_unlock2 ^= 1;
 
-	err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0,
-			ltq_mtd_data->parts, ltq_mtd_data->nr_parts);
+	err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
+					ltq_mtd_data->parts,
+					ltq_mtd_data->nr_parts);
 	if (err) {
 		dev_err(&pdev->dev, "failed to add partitions\n");
 		goto err_destroy;
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 8fed58e..3c7ad17 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -199,8 +199,9 @@
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	mtd_device_parse_register(info->mtd, NULL, 0,
-			latch_addr_data->parts, latch_addr_data->nr_parts);
+	mtd_device_parse_register(info->mtd, NULL, NULL,
+				  latch_addr_data->parts,
+				  latch_addr_data->nr_parts);
 	return 0;
 
 iounmap:
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 0259cf5..a3cfad3 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,13 +294,24 @@
 }
 
 
+static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static int pcmcia_vpp_refcnt;
 static void pcmciamtd_set_vpp(struct map_info *map, int on)
 {
 	struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
 	struct pcmcia_device *link = dev->p_dev;
+	unsigned long flags;
 
 	pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
-	pcmcia_fixup_vpp(link, on ? dev->vpp : 0);
+	spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+	if (on) {
+		if (++pcmcia_vpp_refcnt == 1)   /* first nested 'on' */
+			pcmcia_fixup_vpp(link, dev->vpp);
+	} else {
+		if (--pcmcia_vpp_refcnt == 0)   /* last nested 'off' */
+			pcmcia_fixup_vpp(link, 0);
+	}
+	spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
 }
 
 
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index abc5626..21b0b71 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,6 +27,8 @@
 	struct mtd_info		*mtd[MAX_RESOURCES];
 	struct mtd_info		*cmtd;
 	struct map_info		map[MAX_RESOURCES];
+	spinlock_t		vpp_lock;
+	int			vpp_refcnt;
 };
 
 static int physmap_flash_remove(struct platform_device *dev)
@@ -63,12 +65,26 @@
 {
 	struct platform_device *pdev;
 	struct physmap_flash_data *physmap_data;
+	struct physmap_flash_info *info;
+	unsigned long flags;
 
 	pdev = (struct platform_device *)map->map_priv_1;
 	physmap_data = pdev->dev.platform_data;
 
-	if (physmap_data->set_vpp)
-		physmap_data->set_vpp(pdev, state);
+	if (!physmap_data->set_vpp)
+		return;
+
+	info = platform_get_drvdata(pdev);
+
+	spin_lock_irqsave(&info->vpp_lock, flags);
+	if (state) {
+		if (++info->vpp_refcnt == 1)    /* first nested 'on' */
+			physmap_data->set_vpp(pdev, 1);
+	} else {
+		if (--info->vpp_refcnt == 0)    /* last nested 'off' */
+			physmap_data->set_vpp(pdev, 0);
+	}
+	spin_unlock_irqrestore(&info->vpp_lock, flags);
 }
 
 static const char *rom_probe_types[] = {
@@ -172,9 +188,11 @@
 	if (err)
 		goto err_out;
 
+	spin_lock_init(&info->vpp_lock);
+
 	part_types = physmap_data->part_probe_types ? : part_probe_types;
 
-	mtd_device_parse_register(info->cmtd, part_types, 0,
+	mtd_device_parse_register(info->cmtd, part_types, NULL,
 				  physmap_data->parts, physmap_data->nr_parts);
 	return 0;
 
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 45876d0..891558d 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -222,8 +222,9 @@
 	/* check to see if there are any available partitions, or wether
 	 * to add this device whole */
 
-	err = mtd_device_parse_register(info->mtd, pdata->probes, 0,
-			pdata->partitions, pdata->nr_partitions);
+	err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
+					pdata->partitions,
+					pdata->nr_partitions);
 	if (!err)
 		dev_info(&pdev->dev, "registered mtd device\n");
 
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 436d121..81884c2 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,8 @@
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
+	mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
+				  flash->nr_parts);
 
 	platform_set_drvdata(pdev, info);
 	return 0;
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 3da63fc..6f52e1f 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -102,8 +102,8 @@
 	info->mtd->owner = THIS_MODULE;
 	if (err)
 		goto err_out;
-	err = mtd_device_parse_register(info->mtd, NULL, 0,
-			pdata->parts, pdata->nr_parts);
+	err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
+					pdata->nr_parts);
 
 	if (err)
 		goto err_out;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index cbc3b78..a675bdb 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -36,10 +36,22 @@
 	struct sa_subdev_info	subdev[0];
 };
 
+static DEFINE_SPINLOCK(sa1100_vpp_lock);
+static int sa1100_vpp_refcnt;
 static void sa1100_set_vpp(struct map_info *map, int on)
 {
 	struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
-	subdev->plat->set_vpp(on);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sa1100_vpp_lock, flags);
+	if (on) {
+		if (++sa1100_vpp_refcnt == 1)   /* first nested 'on' */
+			subdev->plat->set_vpp(1);
+	} else {
+		if (--sa1100_vpp_refcnt == 0)   /* last nested 'off' */
+			subdev->plat->set_vpp(0);
+	}
+	spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
 }
 
 static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
@@ -252,8 +264,8 @@
 	/*
 	 * Partition selection stuff.
 	 */
-	mtd_device_parse_register(info->mtd, part_probes, 0,
-			plat->parts, plat->nr_parts);
+	mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
+				  plat->nr_parts);
 
 	platform_set_drvdata(pdev, info);
 	err = 0;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 496c407..9d900ad 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -92,8 +92,8 @@
 		mtd_device_register(eprom_mtd, NULL, 0);
 	}
 
-	mtd_device_parse_register(flash_mtd, probes, 0,
-			superh_se_partitions, NUM_PARTITIONS);
+	mtd_device_parse_register(flash_mtd, probes, NULL,
+				  superh_se_partitions, NUM_PARTITIONS);
 
 	return 0;
 }
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 6793074..cfff454 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -85,7 +85,7 @@
 	}
 
 	mtd->owner = THIS_MODULE;
-	mtd->point = uclinux_point;
+	mtd->_point = uclinux_point;
 	mtd->priv = mapp;
 
 	uclinux_ram_mtdinfo = mtd;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 3a04b07..2e2b094 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -360,9 +360,6 @@
 	int index = 0, retval, partition, leftover, numblocks;
 	unsigned char cx;
 
-	if (len < 1)
-		return -EIO;
-
 	mpart = mtd->priv;
 	mdev = mpart->mdev;
 	partition = mpart->partition;
@@ -434,11 +431,6 @@
 	partition = mpart->partition;
 	card = maple_get_drvdata(mdev);
 
-	/* simple sanity checks */
-	if (len < 1) {
-		error = -EIO;
-		goto failed;
-	}
 	numblocks = card->parts[partition].numblocks;
 	if (to + len > numblocks * card->blocklen)
 		len = numblocks * card->blocklen - to;
@@ -544,9 +536,9 @@
 	mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
 	mtd_cur->size = part_cur->numblocks * card->blocklen;
 	mtd_cur->erasesize = card->blocklen;
-	mtd_cur->write = vmu_flash_write;
-	mtd_cur->read = vmu_flash_read;
-	mtd_cur->sync = vmu_flash_sync;
+	mtd_cur->_write = vmu_flash_write;
+	mtd_cur->_read = vmu_flash_read;
+	mtd_cur->_sync = vmu_flash_sync;
 	mtd_cur->writesize = card->blocklen;
 
 	mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index aa7e0cb..71b0ba7 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -142,7 +142,7 @@
 			nr_parts = ARRAY_SIZE(smallflash_parts);
 		}
 
-		mtd_device_parse_register(sbcmtd[i], part_probes, 0,
+		mtd_device_parse_register(sbcmtd[i], part_probes, NULL,
 					  defparts, nr_parts);
 	}
 	return 0;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 424ca5f..f1f0671 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -233,6 +233,7 @@
 	ret = __get_mtd_device(dev->mtd);
 	if (ret)
 		goto error_release;
+	dev->file_mode = mode;
 
 unlock:
 	dev->open++;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index af65912..6c6d807 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -321,8 +321,12 @@
 	mutex_unlock(&mtdblk->cache_mutex);
 
 	if (!--mtdblk->count) {
-		/* It was the last usage. Free the cache */
-		mtd_sync(mbd->mtd);
+		/*
+		 * It was the last usage. Free the cache, but only sync if
+		 * opened for writing.
+		 */
+		if (mbd->file_mode & FMODE_WRITE)
+			mtd_sync(mbd->mtd);
 		vfree(mtdblk->cache_data);
 	}
 
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index c57ae92..58fc65f 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -39,7 +39,6 @@
 #include <asm/uaccess.h>
 
 static DEFINE_MUTEX(mtd_mutex);
-static struct vfsmount *mtd_inode_mnt __read_mostly;
 
 /*
  * Data structure to hold the pointer to the mtd device as well
@@ -75,7 +74,9 @@
 	return -EINVAL;
 }
 
-
+static int count;
+static struct vfsmount *mnt;
+static struct file_system_type mtd_inodefs_type;
 
 static int mtdchar_open(struct inode *inode, struct file *file)
 {
@@ -92,6 +93,10 @@
 	if ((file->f_mode & FMODE_WRITE) && (minor & 1))
 		return -EACCES;
 
+	ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
+	if (ret)
+		return ret;
+
 	mutex_lock(&mtd_mutex);
 	mtd = get_mtd_device(NULL, devnum);
 
@@ -101,16 +106,14 @@
 	}
 
 	if (mtd->type == MTD_ABSENT) {
-		put_mtd_device(mtd);
 		ret = -ENODEV;
-		goto out;
+		goto out1;
 	}
 
-	mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
+	mtd_ino = iget_locked(mnt->mnt_sb, devnum);
 	if (!mtd_ino) {
-		put_mtd_device(mtd);
 		ret = -ENOMEM;
-		goto out;
+		goto out1;
 	}
 	if (mtd_ino->i_state & I_NEW) {
 		mtd_ino->i_private = mtd;
@@ -122,25 +125,28 @@
 
 	/* You can't open it RW if it's not a writeable device */
 	if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
-		iput(mtd_ino);
-		put_mtd_device(mtd);
 		ret = -EACCES;
-		goto out;
+		goto out2;
 	}
 
 	mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
 	if (!mfi) {
-		iput(mtd_ino);
-		put_mtd_device(mtd);
 		ret = -ENOMEM;
-		goto out;
+		goto out2;
 	}
 	mfi->ino = mtd_ino;
 	mfi->mtd = mtd;
 	file->private_data = mfi;
+	mutex_unlock(&mtd_mutex);
+	return 0;
 
+out2:
+	iput(mtd_ino);
+out1:
+	put_mtd_device(mtd);
 out:
 	mutex_unlock(&mtd_mutex);
+	simple_release_fs(&mnt, &count);
 	return ret;
 } /* mtdchar_open */
 
@@ -162,6 +168,7 @@
 	put_mtd_device(mtd);
 	file->private_data = NULL;
 	kfree(mfi);
+	simple_release_fs(&mnt, &count);
 
 	return 0;
 } /* mtdchar_close */
@@ -405,7 +412,7 @@
 	if (length > 4096)
 		return -EINVAL;
 
-	if (!mtd->write_oob)
+	if (!mtd->_write_oob)
 		ret = -EOPNOTSUPP;
 	else
 		ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
@@ -576,7 +583,7 @@
 			!access_ok(VERIFY_READ, req.usr_data, req.len) ||
 			!access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
 		return -EFAULT;
-	if (!mtd->write_oob)
+	if (!mtd->_write_oob)
 		return -EOPNOTSUPP;
 
 	ops.mode = req.mode;
@@ -1175,10 +1182,15 @@
 #endif
 };
 
+static const struct super_operations mtd_ops = {
+	.drop_inode = generic_delete_inode,
+	.statfs = simple_statfs,
+};
+
 static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
 				int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
+	return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
 }
 
 static struct file_system_type mtd_inodefs_type = {
@@ -1187,26 +1199,6 @@
        .kill_sb = kill_anon_super,
 };
 
-static void mtdchar_notify_add(struct mtd_info *mtd)
-{
-}
-
-static void mtdchar_notify_remove(struct mtd_info *mtd)
-{
-	struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
-
-	if (mtd_ino) {
-		/* Destroy the inode if it exists */
-		clear_nlink(mtd_ino);
-		iput(mtd_ino);
-	}
-}
-
-static struct mtd_notifier mtdchar_notifier = {
-	.add = mtdchar_notify_add,
-	.remove = mtdchar_notify_remove,
-};
-
 static int __init init_mtdchar(void)
 {
 	int ret;
@@ -1224,19 +1216,8 @@
 		pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
 		goto err_unregister_chdev;
 	}
-
-	mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
-	if (IS_ERR(mtd_inode_mnt)) {
-		ret = PTR_ERR(mtd_inode_mnt);
-		pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
-		goto err_unregister_filesystem;
-	}
-	register_mtd_user(&mtdchar_notifier);
-
 	return ret;
 
-err_unregister_filesystem:
-	unregister_filesystem(&mtd_inodefs_type);
 err_unregister_chdev:
 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 	return ret;
@@ -1244,8 +1225,6 @@
 
 static void __exit cleanup_mtdchar(void)
 {
-	unregister_mtd_user(&mtdchar_notifier);
-	kern_unmount(mtd_inode_mnt);
 	unregister_filesystem(&mtd_inodefs_type);
 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 }
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 1ed5103..b900056 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -72,8 +72,6 @@
 	int ret = 0, err;
 	int i;
 
-	*retlen = 0;
-
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
 		size_t size, retsize;
@@ -126,11 +124,6 @@
 	int err = -EINVAL;
 	int i;
 
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-
-	*retlen = 0;
-
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
 		size_t size, retsize;
@@ -145,11 +138,7 @@
 		else
 			size = len;
 
-		if (!(subdev->flags & MTD_WRITEABLE))
-			err = -EROFS;
-		else
-			err = mtd_write(subdev, to, size, &retsize, buf);
-
+		err = mtd_write(subdev, to, size, &retsize, buf);
 		if (err)
 			break;
 
@@ -176,19 +165,10 @@
 	int i;
 	int err = -EINVAL;
 
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-
-	*retlen = 0;
-
 	/* Calculate total length of data */
 	for (i = 0; i < count; i++)
 		total_len += vecs[i].iov_len;
 
-	/* Do not allow write past end of device */
-	if ((to + total_len) > mtd->size)
-		return -EINVAL;
-
 	/* Check alignment */
 	if (mtd->writesize > 1) {
 		uint64_t __to = to;
@@ -224,12 +204,8 @@
 		old_iov_len = vecs_copy[entry_high].iov_len;
 		vecs_copy[entry_high].iov_len = size;
 
-		if (!(subdev->flags & MTD_WRITEABLE))
-			err = -EROFS;
-		else
-			err = mtd_writev(subdev, &vecs_copy[entry_low],
-					 entry_high - entry_low + 1, to,
-					 &retsize);
+		err = mtd_writev(subdev, &vecs_copy[entry_low],
+				 entry_high - entry_low + 1, to, &retsize);
 
 		vecs_copy[entry_high].iov_len = old_iov_len - size;
 		vecs_copy[entry_high].iov_base += size;
@@ -403,15 +379,6 @@
 	uint64_t length, offset = 0;
 	struct erase_info *erase;
 
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-
-	if (instr->addr > concat->mtd.size)
-		return -EINVAL;
-
-	if (instr->len + instr->addr > concat->mtd.size)
-		return -EINVAL;
-
 	/*
 	 * Check for proper erase block alignment of the to-be-erased area.
 	 * It is easier to do this based on the super device's erase
@@ -459,8 +426,6 @@
 			return -EINVAL;
 	}
 
-	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
 	/* make a local copy of instr to avoid modifying the caller's struct */
 	erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
 
@@ -499,10 +464,6 @@
 		else
 			erase->len = length;
 
-		if (!(subdev->flags & MTD_WRITEABLE)) {
-			err = -EROFS;
-			break;
-		}
 		length -= erase->len;
 		if ((err = concat_dev_erase(subdev, erase))) {
 			/* sanity check: should never happen since
@@ -538,9 +499,6 @@
 	struct mtd_concat *concat = CONCAT(mtd);
 	int i, err = -EINVAL;
 
-	if ((len + ofs) > mtd->size)
-		return -EINVAL;
-
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
 		uint64_t size;
@@ -575,9 +533,6 @@
 	struct mtd_concat *concat = CONCAT(mtd);
 	int i, err = 0;
 
-	if ((len + ofs) > mtd->size)
-		return -EINVAL;
-
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
 		uint64_t size;
@@ -650,9 +605,6 @@
 	if (!mtd_can_have_bb(concat->subdev[0]))
 		return res;
 
-	if (ofs > mtd->size)
-		return -EINVAL;
-
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
 
@@ -673,12 +625,6 @@
 	struct mtd_concat *concat = CONCAT(mtd);
 	int i, err = -EINVAL;
 
-	if (!mtd_can_have_bb(concat->subdev[0]))
-		return 0;
-
-	if (ofs > mtd->size)
-		return -EINVAL;
-
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
 
@@ -716,10 +662,6 @@
 			continue;
 		}
 
-		/* we've found the subdev over which the mapping will reside */
-		if (offset + len > subdev->size)
-			return (unsigned long) -EINVAL;
-
 		return mtd_get_unmapped_area(subdev, len, offset, flags);
 	}
 
@@ -777,16 +719,16 @@
 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
 	concat->mtd.oobsize = subdev[0]->oobsize;
 	concat->mtd.oobavail = subdev[0]->oobavail;
-	if (subdev[0]->writev)
-		concat->mtd.writev = concat_writev;
-	if (subdev[0]->read_oob)
-		concat->mtd.read_oob = concat_read_oob;
-	if (subdev[0]->write_oob)
-		concat->mtd.write_oob = concat_write_oob;
-	if (subdev[0]->block_isbad)
-		concat->mtd.block_isbad = concat_block_isbad;
-	if (subdev[0]->block_markbad)
-		concat->mtd.block_markbad = concat_block_markbad;
+	if (subdev[0]->_writev)
+		concat->mtd._writev = concat_writev;
+	if (subdev[0]->_read_oob)
+		concat->mtd._read_oob = concat_read_oob;
+	if (subdev[0]->_write_oob)
+		concat->mtd._write_oob = concat_write_oob;
+	if (subdev[0]->_block_isbad)
+		concat->mtd._block_isbad = concat_block_isbad;
+	if (subdev[0]->_block_markbad)
+		concat->mtd._block_markbad = concat_block_markbad;
 
 	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
 
@@ -833,8 +775,8 @@
 		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
 		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
 		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
-		    !concat->mtd.read_oob  != !subdev[i]->read_oob ||
-		    !concat->mtd.write_oob != !subdev[i]->write_oob) {
+		    !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
+		    !concat->mtd._write_oob != !subdev[i]->_write_oob) {
 			kfree(concat);
 			printk("Incompatible OOB or ECC data on \"%s\"\n",
 			       subdev[i]->name);
@@ -849,15 +791,15 @@
 	concat->num_subdev = num_devs;
 	concat->mtd.name = name;
 
-	concat->mtd.erase = concat_erase;
-	concat->mtd.read = concat_read;
-	concat->mtd.write = concat_write;
-	concat->mtd.sync = concat_sync;
-	concat->mtd.lock = concat_lock;
-	concat->mtd.unlock = concat_unlock;
-	concat->mtd.suspend = concat_suspend;
-	concat->mtd.resume = concat_resume;
-	concat->mtd.get_unmapped_area = concat_get_unmapped_area;
+	concat->mtd._erase = concat_erase;
+	concat->mtd._read = concat_read;
+	concat->mtd._write = concat_write;
+	concat->mtd._sync = concat_sync;
+	concat->mtd._lock = concat_lock;
+	concat->mtd._unlock = concat_unlock;
+	concat->mtd._suspend = concat_suspend;
+	concat->mtd._resume = concat_resume;
+	concat->mtd._get_unmapped_area = concat_get_unmapped_area;
 
 	/*
 	 * Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9a9ce71..c837507 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,7 +107,7 @@
  */
 static void mtd_release(struct device *dev)
 {
-	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
 	dev_t index = MTD_DEVT(mtd->index);
 
 	/* remove /dev/mtdXro node if needed */
@@ -126,7 +126,7 @@
 {
 	struct mtd_info *mtd = dev_get_drvdata(dev);
 
-	if (mtd && mtd->resume)
+	if (mtd)
 		mtd_resume(mtd);
 	return 0;
 }
@@ -610,8 +610,8 @@
 	if (!try_module_get(mtd->owner))
 		return -ENODEV;
 
-	if (mtd->get_device) {
-		err = mtd->get_device(mtd);
+	if (mtd->_get_device) {
+		err = mtd->_get_device(mtd);
 
 		if (err) {
 			module_put(mtd->owner);
@@ -675,14 +675,267 @@
 	--mtd->usecount;
 	BUG_ON(mtd->usecount < 0);
 
-	if (mtd->put_device)
-		mtd->put_device(mtd);
+	if (mtd->_put_device)
+		mtd->_put_device(mtd);
 
 	module_put(mtd->owner);
 }
 EXPORT_SYMBOL_GPL(__put_mtd_device);
 
 /*
+ * Erase is an asynchronous operation.  Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
+		return -EINVAL;
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+	if (!instr->len) {
+		instr->state = MTD_ERASE_DONE;
+		mtd_erase_callback(instr);
+		return 0;
+	}
+	return mtd->_erase(mtd, instr);
+}
+EXPORT_SYMBOL_GPL(mtd_erase);
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+	      void **virt, resource_size_t *phys)
+{
+	*retlen = 0;
+	*virt = NULL;
+	if (phys)
+		*phys = 0;
+	if (!mtd->_point)
+		return -EOPNOTSUPP;
+	if (from < 0 || from > mtd->size || len > mtd->size - from)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_point(mtd, from, len, retlen, virt, phys);
+}
+EXPORT_SYMBOL_GPL(mtd_point);
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+	if (!mtd->_point)
+		return -EOPNOTSUPP;
+	if (from < 0 || from > mtd->size || len > mtd->size - from)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_unpoint(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unpoint);
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+				    unsigned long offset, unsigned long flags)
+{
+	if (!mtd->_get_unmapped_area)
+		return -EOPNOTSUPP;
+	if (offset > mtd->size || len > mtd->size - offset)
+		return -EINVAL;
+	return mtd->_get_unmapped_area(mtd, len, offset, flags);
+}
+EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+	     u_char *buf)
+{
+	*retlen = 0;
+	if (from < 0 || from > mtd->size || len > mtd->size - from)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_read(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read);
+
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+	      const u_char *buf)
+{
+	*retlen = 0;
+	if (to < 0 || to > mtd->size || len > mtd->size - to)
+		return -EINVAL;
+	if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	if (!len)
+		return 0;
+	return mtd->_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_write);
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+		    const u_char *buf)
+{
+	*retlen = 0;
+	if (!mtd->_panic_write)
+		return -EOPNOTSUPP;
+	if (to < 0 || to > mtd->size || len > mtd->size - to)
+		return -EINVAL;
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	if (!len)
+		return 0;
+	return mtd->_panic_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_panic_write);
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+			   size_t len)
+{
+	if (!mtd->_get_fact_prot_info)
+		return -EOPNOTSUPP;
+	if (!len)
+		return 0;
+	return mtd->_get_fact_prot_info(mtd, buf, len);
+}
+EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
+
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+			   size_t *retlen, u_char *buf)
+{
+	*retlen = 0;
+	if (!mtd->_read_fact_prot_reg)
+		return -EOPNOTSUPP;
+	if (!len)
+		return 0;
+	return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
+
+int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+			   size_t len)
+{
+	if (!mtd->_get_user_prot_info)
+		return -EOPNOTSUPP;
+	if (!len)
+		return 0;
+	return mtd->_get_user_prot_info(mtd, buf, len);
+}
+EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
+
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+			   size_t *retlen, u_char *buf)
+{
+	*retlen = 0;
+	if (!mtd->_read_user_prot_reg)
+		return -EOPNOTSUPP;
+	if (!len)
+		return 0;
+	return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
+
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+			    size_t *retlen, u_char *buf)
+{
+	*retlen = 0;
+	if (!mtd->_write_user_prot_reg)
+		return -EOPNOTSUPP;
+	if (!len)
+		return 0;
+	return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
+
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
+{
+	if (!mtd->_lock_user_prot_reg)
+		return -EOPNOTSUPP;
+	if (!len)
+		return 0;
+	return mtd->_lock_user_prot_reg(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
+
+/* Chip-supported device locking */
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	if (!mtd->_lock)
+		return -EOPNOTSUPP;
+	if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_lock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock);
+
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	if (!mtd->_unlock)
+		return -EOPNOTSUPP;
+	if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_unlock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unlock);
+
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	if (!mtd->_is_locked)
+		return -EOPNOTSUPP;
+	if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+		return -EINVAL;
+	if (!len)
+		return 0;
+	return mtd->_is_locked(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_is_locked);
+
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+	if (!mtd->_block_isbad)
+		return 0;
+	if (ofs < 0 || ofs > mtd->size)
+		return -EINVAL;
+	return mtd->_block_isbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isbad);
+
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	if (!mtd->_block_markbad)
+		return -EOPNOTSUPP;
+	if (ofs < 0 || ofs > mtd->size)
+		return -EINVAL;
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	return mtd->_block_markbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_markbad);
+
+/*
  * default_mtd_writev - the default writev method
  * @mtd: mtd device description object pointer
  * @vecs: the vectors to write
@@ -729,9 +982,11 @@
 	       unsigned long count, loff_t to, size_t *retlen)
 {
 	*retlen = 0;
-	if (!mtd->writev)
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	if (!mtd->_writev)
 		return default_mtd_writev(mtd, vecs, count, to, retlen);
-	return mtd->writev(mtd, vecs, count, to, retlen);
+	return mtd->_writev(mtd, vecs, count, to, retlen);
 }
 EXPORT_SYMBOL_GPL(mtd_writev);
 
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 3ce99e0..ae36d7e 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -169,7 +169,7 @@
 			cxt->nextpage = 0;
 	}
 
-	while (mtd_can_have_bb(mtd)) {
+	while (1) {
 		ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
 		if (!ret)
 			break;
@@ -199,9 +199,9 @@
 		return;
 	}
 
-	if (mtd_can_have_bb(mtd) && ret == -EIO) {
+	if (ret == -EIO) {
 		ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
-		if (ret < 0) {
+		if (ret < 0 && ret != -EOPNOTSUPP) {
 			printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
 			return;
 		}
@@ -257,8 +257,7 @@
 	size_t retlen;
 
 	for (page = 0; page < cxt->oops_pages; page++) {
-		if (mtd_can_have_bb(mtd) &&
-		    mtd_block_isbad(mtd, page * record_size))
+		if (mtd_block_isbad(mtd, page * record_size))
 			continue;
 		/* Assume the page is used */
 		mark_page_used(cxt, page);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a3d44c3..9651c06 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -65,12 +65,8 @@
 	int res;
 
 	stats = part->master->ecc_stats;
-
-	if (from >= mtd->size)
-		len = 0;
-	else if (from + len > mtd->size)
-		len = mtd->size - from;
-	res = mtd_read(part->master, from + part->offset, len, retlen, buf);
+	res = part->master->_read(part->master, from + part->offset, len,
+				  retlen, buf);
 	if (unlikely(res)) {
 		if (mtd_is_bitflip(res))
 			mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -84,19 +80,16 @@
 		size_t *retlen, void **virt, resource_size_t *phys)
 {
 	struct mtd_part *part = PART(mtd);
-	if (from >= mtd->size)
-		len = 0;
-	else if (from + len > mtd->size)
-		len = mtd->size - from;
-	return mtd_point(part->master, from + part->offset, len, retlen,
-			 virt, phys);
+
+	return part->master->_point(part->master, from + part->offset, len,
+				    retlen, virt, phys);
 }
 
-static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
 	struct mtd_part *part = PART(mtd);
 
-	mtd_unpoint(part->master, from + part->offset, len);
+	return part->master->_unpoint(part->master, from + part->offset, len);
 }
 
 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -107,7 +100,8 @@
 	struct mtd_part *part = PART(mtd);
 
 	offset += part->offset;
-	return mtd_get_unmapped_area(part->master, len, offset, flags);
+	return part->master->_get_unmapped_area(part->master, len, offset,
+						flags);
 }
 
 static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -138,7 +132,7 @@
 			return -EINVAL;
 	}
 
-	res = mtd_read_oob(part->master, from + part->offset, ops);
+	res = part->master->_read_oob(part->master, from + part->offset, ops);
 	if (unlikely(res)) {
 		if (mtd_is_bitflip(res))
 			mtd->ecc_stats.corrected++;
@@ -152,55 +146,46 @@
 		size_t len, size_t *retlen, u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
+	return part->master->_read_user_prot_reg(part->master, from, len,
+						 retlen, buf);
 }
 
 static int part_get_user_prot_info(struct mtd_info *mtd,
 		struct otp_info *buf, size_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_get_user_prot_info(part->master, buf, len);
+	return part->master->_get_user_prot_info(part->master, buf, len);
 }
 
 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
 		size_t len, size_t *retlen, u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
+	return part->master->_read_fact_prot_reg(part->master, from, len,
+						 retlen, buf);
 }
 
 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
 		size_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_get_fact_prot_info(part->master, buf, len);
+	return part->master->_get_fact_prot_info(part->master, buf, len);
 }
 
 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
 		size_t *retlen, const u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-	if (to >= mtd->size)
-		len = 0;
-	else if (to + len > mtd->size)
-		len = mtd->size - to;
-	return mtd_write(part->master, to + part->offset, len, retlen, buf);
+	return part->master->_write(part->master, to + part->offset, len,
+				    retlen, buf);
 }
 
 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
 		size_t *retlen, const u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-	if (to >= mtd->size)
-		len = 0;
-	else if (to + len > mtd->size)
-		len = mtd->size - to;
-	return mtd_panic_write(part->master, to + part->offset, len, retlen,
-			       buf);
+	return part->master->_panic_write(part->master, to + part->offset, len,
+					  retlen, buf);
 }
 
 static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -208,50 +193,43 @@
 {
 	struct mtd_part *part = PART(mtd);
 
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-
 	if (to >= mtd->size)
 		return -EINVAL;
 	if (ops->datbuf && to + ops->len > mtd->size)
 		return -EINVAL;
-	return mtd_write_oob(part->master, to + part->offset, ops);
+	return part->master->_write_oob(part->master, to + part->offset, ops);
 }
 
 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
 		size_t len, size_t *retlen, u_char *buf)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
+	return part->master->_write_user_prot_reg(part->master, from, len,
+						  retlen, buf);
 }
 
 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
 		size_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_lock_user_prot_reg(part->master, from, len);
+	return part->master->_lock_user_prot_reg(part->master, from, len);
 }
 
 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
 		unsigned long count, loff_t to, size_t *retlen)
 {
 	struct mtd_part *part = PART(mtd);
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-	return mtd_writev(part->master, vecs, count, to + part->offset,
-			  retlen);
+	return part->master->_writev(part->master, vecs, count,
+				     to + part->offset, retlen);
 }
 
 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
 	struct mtd_part *part = PART(mtd);
 	int ret;
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-	if (instr->addr >= mtd->size)
-		return -EINVAL;
+
 	instr->addr += part->offset;
-	ret = mtd_erase(part->master, instr);
+	ret = part->master->_erase(part->master, instr);
 	if (ret) {
 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 			instr->fail_addr -= part->offset;
@@ -262,7 +240,7 @@
 
 void mtd_erase_callback(struct erase_info *instr)
 {
-	if (instr->mtd->erase == part_erase) {
+	if (instr->mtd->_erase == part_erase) {
 		struct mtd_part *part = PART(instr->mtd);
 
 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
@@ -277,52 +255,44 @@
 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	if ((len + ofs) > mtd->size)
-		return -EINVAL;
-	return mtd_lock(part->master, ofs + part->offset, len);
+	return part->master->_lock(part->master, ofs + part->offset, len);
 }
 
 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	if ((len + ofs) > mtd->size)
-		return -EINVAL;
-	return mtd_unlock(part->master, ofs + part->offset, len);
+	return part->master->_unlock(part->master, ofs + part->offset, len);
 }
 
 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_part *part = PART(mtd);
-	if ((len + ofs) > mtd->size)
-		return -EINVAL;
-	return mtd_is_locked(part->master, ofs + part->offset, len);
+	return part->master->_is_locked(part->master, ofs + part->offset, len);
 }
 
 static void part_sync(struct mtd_info *mtd)
 {
 	struct mtd_part *part = PART(mtd);
-	mtd_sync(part->master);
+	part->master->_sync(part->master);
 }
 
 static int part_suspend(struct mtd_info *mtd)
 {
 	struct mtd_part *part = PART(mtd);
-	return mtd_suspend(part->master);
+	return part->master->_suspend(part->master);
 }
 
 static void part_resume(struct mtd_info *mtd)
 {
 	struct mtd_part *part = PART(mtd);
-	mtd_resume(part->master);
+	part->master->_resume(part->master);
 }
 
 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
 	struct mtd_part *part = PART(mtd);
-	if (ofs >= mtd->size)
-		return -EINVAL;
 	ofs += part->offset;
-	return mtd_block_isbad(part->master, ofs);
+	return part->master->_block_isbad(part->master, ofs);
 }
 
 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -330,12 +300,8 @@
 	struct mtd_part *part = PART(mtd);
 	int res;
 
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-	if (ofs >= mtd->size)
-		return -EINVAL;
 	ofs += part->offset;
-	res = mtd_block_markbad(part->master, ofs);
+	res = part->master->_block_markbad(part->master, ofs);
 	if (!res)
 		mtd->ecc_stats.badblocks++;
 	return res;
@@ -410,54 +376,55 @@
 	 */
 	slave->mtd.dev.parent = master->dev.parent;
 
-	slave->mtd.read = part_read;
-	slave->mtd.write = part_write;
+	slave->mtd._read = part_read;
+	slave->mtd._write = part_write;
 
-	if (master->panic_write)
-		slave->mtd.panic_write = part_panic_write;
+	if (master->_panic_write)
+		slave->mtd._panic_write = part_panic_write;
 
-	if (master->point && master->unpoint) {
-		slave->mtd.point = part_point;
-		slave->mtd.unpoint = part_unpoint;
+	if (master->_point && master->_unpoint) {
+		slave->mtd._point = part_point;
+		slave->mtd._unpoint = part_unpoint;
 	}
 
-	if (master->get_unmapped_area)
-		slave->mtd.get_unmapped_area = part_get_unmapped_area;
-	if (master->read_oob)
-		slave->mtd.read_oob = part_read_oob;
-	if (master->write_oob)
-		slave->mtd.write_oob = part_write_oob;
-	if (master->read_user_prot_reg)
-		slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
-	if (master->read_fact_prot_reg)
-		slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
-	if (master->write_user_prot_reg)
-		slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
-	if (master->lock_user_prot_reg)
-		slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
-	if (master->get_user_prot_info)
-		slave->mtd.get_user_prot_info = part_get_user_prot_info;
-	if (master->get_fact_prot_info)
-		slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
-	if (master->sync)
-		slave->mtd.sync = part_sync;
-	if (!partno && !master->dev.class && master->suspend && master->resume) {
-			slave->mtd.suspend = part_suspend;
-			slave->mtd.resume = part_resume;
+	if (master->_get_unmapped_area)
+		slave->mtd._get_unmapped_area = part_get_unmapped_area;
+	if (master->_read_oob)
+		slave->mtd._read_oob = part_read_oob;
+	if (master->_write_oob)
+		slave->mtd._write_oob = part_write_oob;
+	if (master->_read_user_prot_reg)
+		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
+	if (master->_read_fact_prot_reg)
+		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
+	if (master->_write_user_prot_reg)
+		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
+	if (master->_lock_user_prot_reg)
+		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
+	if (master->_get_user_prot_info)
+		slave->mtd._get_user_prot_info = part_get_user_prot_info;
+	if (master->_get_fact_prot_info)
+		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
+	if (master->_sync)
+		slave->mtd._sync = part_sync;
+	if (!partno && !master->dev.class && master->_suspend &&
+	    master->_resume) {
+			slave->mtd._suspend = part_suspend;
+			slave->mtd._resume = part_resume;
 	}
-	if (master->writev)
-		slave->mtd.writev = part_writev;
-	if (master->lock)
-		slave->mtd.lock = part_lock;
-	if (master->unlock)
-		slave->mtd.unlock = part_unlock;
-	if (master->is_locked)
-		slave->mtd.is_locked = part_is_locked;
-	if (master->block_isbad)
-		slave->mtd.block_isbad = part_block_isbad;
-	if (master->block_markbad)
-		slave->mtd.block_markbad = part_block_markbad;
-	slave->mtd.erase = part_erase;
+	if (master->_writev)
+		slave->mtd._writev = part_writev;
+	if (master->_lock)
+		slave->mtd._lock = part_lock;
+	if (master->_unlock)
+		slave->mtd._unlock = part_unlock;
+	if (master->_is_locked)
+		slave->mtd._is_locked = part_is_locked;
+	if (master->_block_isbad)
+		slave->mtd._block_isbad = part_block_isbad;
+	if (master->_block_markbad)
+		slave->mtd._block_markbad = part_block_markbad;
+	slave->mtd._erase = part_erase;
 	slave->master = master;
 	slave->offset = part->offset;
 
@@ -549,7 +516,8 @@
 	}
 
 	slave->mtd.ecclayout = master->ecclayout;
-	if (master->block_isbad) {
+	slave->mtd.ecc_strength = master->ecc_strength;
+	if (master->_block_isbad) {
 		uint64_t offs = 0;
 
 		while (offs < slave->mtd.size) {
@@ -761,7 +729,7 @@
 	for ( ; ret <= 0 && *types; types++) {
 		parser = get_partition_parser(*types);
 		if (!parser && !request_module("%s", *types))
-				parser = get_partition_parser(*types);
+			parser = get_partition_parser(*types);
 		if (!parser)
 			continue;
 		ret = (*parser->parse_fn)(master, pparts, data);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index a3c4de5..7d17cec 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -314,6 +314,26 @@
 	  load time (assuming you build diskonchip as a module) with the module
 	  parameter "inftl_bbt_write=1".
 
+config MTD_NAND_DOCG4
+	tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	select BCH
+	select BITREVERSE
+	help
+	  Support for diskonchip G4 nand flash, found in various smartphones and
+	  PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
+	  Portege G900, Asus P526, and O2 XDA Zinc.
+
+	  With this driver you will be able to use UBI and create a ubifs on the
+	  device, so you may wish to consider enabling UBI and UBIFS as well.
+
+	  These devices ship with the Mys/Sandisk SAFTL formatting, for which
+	  there is currently no mtd parser, so you may want to use command line
+	  partitioning to segregate write-protected blocks. On the Treo680, the
+	  first five erase blocks (256KiB each) are write-protected, followed
+	  by the block containing the saftl partition table.  This is probably
+	  typical.
+
 config MTD_NAND_SHARPSL
 	tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
 	depends on ARCH_PXA
@@ -421,7 +441,6 @@
 config MTD_NAND_GPMI_NAND
         bool "GPMI NAND Flash Controller driver"
         depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
-	select MTD_CMDLINE_PARTS
         help
 	 Enables NAND Flash support for IMX23 or IMX28.
 	 The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19bc8cb..d4b4d87 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_MTD_NAND_S3C2410)		+= s3c2410.o
 obj-$(CONFIG_MTD_NAND_DAVINCI)		+= davinci_nand.o
 obj-$(CONFIG_MTD_NAND_DISKONCHIP)	+= diskonchip.o
+obj-$(CONFIG_MTD_NAND_DOCG4)		+= docg4.o
 obj-$(CONFIG_MTD_NAND_FSMC)		+= fsmc_nand.o
 obj-$(CONFIG_MTD_NAND_H1900)		+= h1910.o
 obj-$(CONFIG_MTD_NAND_RTC_FROM4)	+= rtc_from4.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 6a5ff64..4f20e1d 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -585,12 +585,13 @@
 	mtd->writesize = 1<<card->pageshift;
 	mtd->type = MTD_NANDFLASH;
 	mtd->flags = MTD_CAP_NANDFLASH;
-	mtd->read = alauda_read;
-	mtd->write = alauda_write;
-	mtd->erase = alauda_erase;
-	mtd->block_isbad = alauda_isbad;
+	mtd->_read = alauda_read;
+	mtd->_write = alauda_write;
+	mtd->_erase = alauda_erase;
+	mtd->_block_isbad = alauda_isbad;
 	mtd->priv = al;
 	mtd->owner = THIS_MODULE;
+	mtd->ecc_strength = 1;
 
 	err = mtd_device_register(mtd, NULL, 0);
 	if (err) {
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index ae7e37d..2165576 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -603,6 +603,7 @@
 		nand_chip->ecc.hwctl = atmel_nand_hwctl;
 		nand_chip->ecc.read_page = atmel_nand_read_page;
 		nand_chip->ecc.bytes = 4;
+		nand_chip->ecc.strength = 1;
 	}
 
 	nand_chip->chip_delay = 20;		/* 20us command delay time */
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 64c9cba..6908cdd 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -475,6 +475,14 @@
 			largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
 		this->badblock_pattern = &largepage_bbt;
 	}
+
+	/*
+	 * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
+	 * conservative guess, given 13 ecc bytes and using bch alg.
+	 * (Assume Galois field order m=15 to allow a margin of error.)
+	 */
+	this->ecc.strength = 6;
+
 #endif
 
 	/* Now finish off the scan, now that ecc.layout has been initialized. */
@@ -487,7 +495,7 @@
 
 	/* Register the partitions */
 	board_mtd->name = "bcm_umi-nand";
-	mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0);
+	mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0);
 
 	/* Return happy */
 	return 0;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index dd899cb..d7b86b9 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -702,9 +702,11 @@
 		if (likely(mtd->writesize >= 512)) {
 			chip->ecc.size = 512;
 			chip->ecc.bytes = 6;
+			chip->ecc.strength = 2;
 		} else {
 			chip->ecc.size = 256;
 			chip->ecc.bytes = 3;
+			chip->ecc.strength = 1;
 			bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
 			SSYNC();
 		}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 72d3f23..2a96e1a 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -783,6 +783,7 @@
 	cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
 	cafe->nand.ecc.size = mtd->writesize;
 	cafe->nand.ecc.bytes = 14;
+	cafe->nand.ecc.strength = 4;
 	cafe->nand.ecc.hwctl  = (void *)cafe_nand_bug;
 	cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
 	cafe->nand.ecc.correct  = (void *)cafe_nand_bug;
@@ -799,7 +800,7 @@
 	pci_set_drvdata(pdev, mtd);
 
 	mtd->name = "cafe_nand";
-	mtd_device_parse_register(mtd, part_probes, 0, NULL, 0);
+	mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
 
 	goto out;
 
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 737ef9a..1024bfc 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -219,7 +219,7 @@
 	}
 
 	/* Register the partitions */
-	ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0,
+	ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
 					partition_info, NUM_PARTITIONS);
 	if (ret)
 		goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 414afa7..821c34c 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -248,6 +248,8 @@
 		goto out_ior;
 	}
 
+	this->ecc.strength = 1;
+
 	new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
 
 	cs553x_mtd[cs] = new_mtd;
@@ -313,7 +315,7 @@
 	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
 		if (cs553x_mtd[i]) {
 			/* If any devices registered, return success. Else the last error. */
-			mtd_device_parse_register(cs553x_mtd[i], NULL, 0,
+			mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
 						  NULL, 0);
 			err = 0;
 		}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 6e56615..d94b03c 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -641,6 +641,7 @@
 			info->chip.ecc.bytes = 3;
 		}
 		info->chip.ecc.size = 512;
+		info->chip.ecc.strength = pdata->ecc_bits;
 		break;
 	default:
 		ret = -EINVAL;
@@ -752,8 +753,8 @@
 	if (ret < 0)
 		goto err_scan;
 
-	ret = mtd_device_parse_register(&info->mtd, NULL, 0,
-			pdata->parts, pdata->nr_parts);
+	ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+					pdata->nr_parts);
 
 	if (ret < 0)
 		goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3984d48..a9e57d6 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1590,6 +1590,7 @@
 			ECC_15BITS * (denali->mtd.writesize /
 			ECC_SECTOR_SIZE)))) {
 		/* if MLC OOB size is large enough, use 15bit ECC*/
+		denali->nand.ecc.strength = 15;
 		denali->nand.ecc.layout = &nand_15bit_oob;
 		denali->nand.ecc.bytes = ECC_15BITS;
 		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
@@ -1600,12 +1601,14 @@
 				" contain 8bit ECC correction codes");
 		goto failed_req_irq;
 	} else {
+		denali->nand.ecc.strength = 8;
 		denali->nand.ecc.layout = &nand_8bit_oob;
 		denali->nand.ecc.bytes = ECC_8BITS;
 		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
 	}
 
 	denali->nand.ecc.bytes *= denali->devnum;
+	denali->nand.ecc.strength *= denali->devnum;
 	denali->nand.ecc.layout->eccbytes *=
 		denali->mtd.writesize / ECC_SECTOR_SIZE;
 	denali->nand.ecc.layout->oobfree[0].offset =
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index df921e7..e2ca067 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1653,6 +1653,7 @@
 	nand->ecc.mode		= NAND_ECC_HW_SYNDROME;
 	nand->ecc.size		= 512;
 	nand->ecc.bytes		= 6;
+	nand->ecc.strength	= 2;
 	nand->bbt_options	= NAND_BBT_USE_FLASH;
 
 	doc->physadr		= physadr;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
new file mode 100644
index 0000000..b082026
--- /dev/null
+++ b/drivers/mtd/nand/docg4.c
@@ -0,0 +1,1377 @@
+/*
+ *  Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
+ *
+ * mtd nand driver for M-Systems DiskOnChip G4
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Tested on the Palm Treo 680.  The G4 is also present on Toshiba Portege, Asus
+ * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
+ * Should work on these as well.  Let me know!
+ *
+ * TODO:
+ *
+ *  Mechanism for management of password-protected areas
+ *
+ *  Hamming ecc when reading oob only
+ *
+ *  According to the M-Sys documentation, this device is also available in a
+ *  "dual-die" configuration having a 256MB capacity, but no mechanism for
+ *  detecting this variant is documented.  Currently this driver assumes 128MB
+ *  capacity.
+ *
+ *  Support for multiple cascaded devices ("floors").  Not sure which gadgets
+ *  contain multiple G4s in a cascaded configuration, if any.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+#include <linux/bitrev.h>
+
+/*
+ * You'll want to ignore badblocks if you're reading a partition that contains
+ * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
+ * it does not use mtd nand's method for marking bad blocks (using oob area).
+ * This will also skip the check of the "page written" flag.
+ */
+static bool ignore_badblocks;
+module_param(ignore_badblocks, bool, 0);
+MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
+
+struct docg4_priv {
+	struct mtd_info	*mtd;
+	struct device *dev;
+	void __iomem *virtadr;
+	int status;
+	struct {
+		unsigned int command;
+		int column;
+		int page;
+	} last_command;
+	uint8_t oob_buf[16];
+	uint8_t ecc_buf[7];
+	int oob_page;
+	struct bch_control *bch;
+};
+
+/*
+ * Defines prefixed with DOCG4 are unique to the diskonchip G4.  All others are
+ * shared with other diskonchip devices (P3, G3 at least).
+ *
+ * Functions with names prefixed with docg4_ are mtd / nand interface functions
+ * (though they may also be called internally).  All others are internal.
+ */
+
+#define DOC_IOSPACE_DATA		0x0800
+
+/* register offsets */
+#define DOC_CHIPID			0x1000
+#define DOC_DEVICESELECT		0x100a
+#define DOC_ASICMODE			0x100c
+#define DOC_DATAEND			0x101e
+#define DOC_NOP				0x103e
+
+#define DOC_FLASHSEQUENCE		0x1032
+#define DOC_FLASHCOMMAND		0x1034
+#define DOC_FLASHADDRESS		0x1036
+#define DOC_FLASHCONTROL		0x1038
+#define DOC_ECCCONF0			0x1040
+#define DOC_ECCCONF1			0x1042
+#define DOC_HAMMINGPARITY		0x1046
+#define DOC_BCH_SYNDROM(idx)		(0x1048 + idx)
+
+#define DOC_ASICMODECONFIRM		0x1072
+#define DOC_CHIPID_INV			0x1074
+#define DOC_POWERMODE			0x107c
+
+#define DOCG4_MYSTERY_REG		0x1050
+
+/* apparently used only to write oob bytes 6 and 7 */
+#define DOCG4_OOB_6_7			0x1052
+
+/* DOC_FLASHSEQUENCE register commands */
+#define DOC_SEQ_RESET			0x00
+#define DOCG4_SEQ_PAGE_READ		0x03
+#define DOCG4_SEQ_FLUSH			0x29
+#define DOCG4_SEQ_PAGEWRITE		0x16
+#define DOCG4_SEQ_PAGEPROG		0x1e
+#define DOCG4_SEQ_BLOCKERASE		0x24
+
+/* DOC_FLASHCOMMAND register commands */
+#define DOCG4_CMD_PAGE_READ             0x00
+#define DOC_CMD_ERASECYCLE2		0xd0
+#define DOCG4_CMD_FLUSH                 0x70
+#define DOCG4_CMD_READ2                 0x30
+#define DOC_CMD_PROG_BLOCK_ADDR		0x60
+#define DOCG4_CMD_PAGEWRITE		0x80
+#define DOC_CMD_PROG_CYCLE2		0x10
+#define DOC_CMD_RESET			0xff
+
+/* DOC_POWERMODE register bits */
+#define DOC_POWERDOWN_READY		0x80
+
+/* DOC_FLASHCONTROL register bits */
+#define DOC_CTRL_CE			0x10
+#define DOC_CTRL_UNKNOWN		0x40
+#define DOC_CTRL_FLASHREADY		0x01
+
+/* DOC_ECCCONF0 register bits */
+#define DOC_ECCCONF0_READ_MODE		0x8000
+#define DOC_ECCCONF0_UNKNOWN		0x2000
+#define DOC_ECCCONF0_ECC_ENABLE	        0x1000
+#define DOC_ECCCONF0_DATA_BYTES_MASK	0x07ff
+
+/* DOC_ECCCONF1 register bits */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR	0x80
+#define DOC_ECCCONF1_ECC_ENABLE         0x07
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN	0x20
+
+/* DOC_ASICMODE register bits */
+#define DOC_ASICMODE_RESET		0x00
+#define DOC_ASICMODE_NORMAL		0x01
+#define DOC_ASICMODE_POWERDOWN		0x02
+#define DOC_ASICMODE_MDWREN		0x04
+#define DOC_ASICMODE_BDETCT_RESET	0x08
+#define DOC_ASICMODE_RSTIN_RESET	0x10
+#define DOC_ASICMODE_RAM_WE		0x20
+
+/* good status values read after read/write/erase operations */
+#define DOCG4_PROGSTATUS_GOOD          0x51
+#define DOCG4_PROGSTATUS_GOOD_2        0xe0
+
+/*
+ * On read operations (page and oob-only), the first byte read from I/O reg is a
+ * status.  On error, it reads 0x73; otherwise, it reads either 0x71 (first read
+ * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
+ */
+#define DOCG4_READ_ERROR           0x02 /* bit 1 indicates read error */
+
+/* anatomy of the device */
+#define DOCG4_CHIP_SIZE        0x8000000
+#define DOCG4_PAGE_SIZE        0x200
+#define DOCG4_PAGES_PER_BLOCK  0x200
+#define DOCG4_BLOCK_SIZE       (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
+#define DOCG4_NUMBLOCKS        (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
+#define DOCG4_OOB_SIZE         0x10
+#define DOCG4_CHIP_SHIFT       27    /* log_2(DOCG4_CHIP_SIZE) */
+#define DOCG4_PAGE_SHIFT       9     /* log_2(DOCG4_PAGE_SIZE) */
+#define DOCG4_ERASE_SHIFT      18    /* log_2(DOCG4_BLOCK_SIZE) */
+
+/* all but the last byte is included in ecc calculation */
+#define DOCG4_BCH_SIZE         (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
+
+#define DOCG4_USERDATA_LEN     520 /* 512 byte page plus 8 oob avail to user */
+
+/* expected values from the ID registers */
+#define DOCG4_IDREG1_VALUE     0x0400
+#define DOCG4_IDREG2_VALUE     0xfbff
+
+/* primitive polynomial used to build the Galois field used by hw ecc gen */
+#define DOCG4_PRIMITIVE_POLY   0x4443
+
+#define DOCG4_M                14  /* Galois field is of order 2^14 */
+#define DOCG4_T                4   /* BCH alg corrects up to 4 bit errors */
+
+#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+
+/*
+ * Oob bytes 0 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 bytes.  Bytes 8 - 14 are hw-generated ecc.
+ * Byte 15 (the last) is used by the driver as a "page written" flag.
+ */
+static struct nand_ecclayout docg4_oobinfo = {
+	.eccbytes = 9,
+	.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
+	.oobavail = 7,
+	.oobfree = { {0, 7} }
+};
+
+/*
+ * The device has a nop register which M-Sys claims is for the purpose of
+ * inserting precise delays.  But beware; at least some operations fail if the
+ * nop writes are replaced with a generic delay!
+ */
+static inline void write_nop(void __iomem *docptr)
+{
+	writew(0, docptr + DOC_NOP);
+}
+
+static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *nand = mtd->priv;
+	uint16_t *p = (uint16_t *) buf;
+	len >>= 1;
+
+	for (i = 0; i < len; i++)
+		p[i] = readw(nand->IO_ADDR_R);
+}
+
+static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *nand = mtd->priv;
+	uint16_t *p = (uint16_t *) buf;
+	len >>= 1;
+
+	for (i = 0; i < len; i++)
+		writew(p[i], nand->IO_ADDR_W);
+}
+
+static int poll_status(struct docg4_priv *doc)
+{
+	/*
+	 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
+	 * register.  Operations known to take a long time (e.g., block erase)
+	 * should sleep for a while before calling this.
+	 */
+
+	uint16_t flash_status;
+	unsigned int timeo;
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	/* hardware quirk requires reading twice initially */
+	flash_status = readw(docptr + DOC_FLASHCONTROL);
+
+	timeo = 1000;
+	do {
+		cpu_relax();
+		flash_status = readb(docptr + DOC_FLASHCONTROL);
+	} while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
+
+
+	if (!timeo) {
+		dev_err(doc->dev, "%s: timed out!\n", __func__);
+		return NAND_STATUS_FAIL;
+	}
+
+	if (unlikely(timeo < 50))
+		dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
+			 __func__, timeo);
+
+	return 0;
+}
+
+
+static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
+{
+
+	struct docg4_priv *doc = nand->priv;
+	int status = NAND_STATUS_WP;       /* inverse logic?? */
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	/* report any previously unreported error */
+	if (doc->status) {
+		status |= doc->status;
+		doc->status = 0;
+		return status;
+	}
+
+	status |= poll_status(doc);
+	return status;
+}
+
+static void docg4_select_chip(struct mtd_info *mtd, int chip)
+{
+	/*
+	 * Select among multiple cascaded chips ("floors").  Multiple floors are
+	 * not yet supported, so the only valid non-negative value is 0.
+	 */
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
+
+	if (chip < 0)
+		return;		/* deselected */
+
+	if (chip > 0)
+		dev_warn(doc->dev, "multiple floors currently unsupported\n");
+
+	writew(0, docptr + DOC_DEVICESELECT);
+}
+
+static void reset(struct mtd_info *mtd)
+{
+	/* full device reset */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+
+	writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
+	       docptr + DOC_ASICMODE);
+	writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
+	       docptr + DOC_ASICMODECONFIRM);
+	write_nop(docptr);
+
+	writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
+	       docptr + DOC_ASICMODE);
+	writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
+	       docptr + DOC_ASICMODECONFIRM);
+
+	writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
+
+	poll_status(doc);
+}
+
+static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
+{
+	/* read the 7 hw-generated ecc bytes */
+
+	int i;
+	for (i = 0; i < 7; i++) { /* hw quirk; read twice */
+		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+	}
+}
+
+static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+	/*
+	 * Called after a page read when hardware reports bitflips.
+	 * Up to four bitflips can be corrected.
+	 */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+	int i, numerrs, errpos[4];
+	const uint8_t blank_read_hwecc[8] = {
+		0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
+
+	read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
+
+	/* check if read error is due to a blank page */
+	if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
+		return 0;	/* yes */
+
+	/* skip additional check of "written flag" if ignore_badblocks */
+	if (ignore_badblocks == false) {
+
+		/*
+		 * If the hw ecc bytes are not those of a blank page, there's
+		 * still a chance that the page is blank, but was read with
+		 * errors.  Check the "written flag" in last oob byte, which
+		 * is set to zero when a page is written.  If more than half
+		 * the bits are set, assume a blank page.  Unfortunately, the
+		 * bit flips(s) are not reported in stats.
+		 */
+
+		if (doc->oob_buf[15]) {
+			int bit, numsetbits = 0;
+			unsigned long written_flag = doc->oob_buf[15];
+			for_each_set_bit(bit, &written_flag, 8)
+				numsetbits++;
+			if (numsetbits > 4) { /* assume blank */
+				dev_warn(doc->dev,
+					 "error(s) in blank page "
+					 "at offset %08x\n",
+					 page * DOCG4_PAGE_SIZE);
+				return 0;
+			}
+		}
+	}
+
+	/*
+	 * The hardware ecc unit produces oob_ecc ^ calc_ecc.  The kernel's bch
+	 * algorithm is used to decode this.  However the hw operates on page
+	 * data in a bit order that is the reverse of that of the bch alg,
+	 * requiring that the bits be reversed on the result.  Thanks to Ivan
+	 * Djelic for his analysis!
+	 */
+	for (i = 0; i < 7; i++)
+		doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
+
+	numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
+			     doc->ecc_buf, NULL, errpos);
+
+	if (numerrs == -EBADMSG) {
+		dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
+			 page * DOCG4_PAGE_SIZE);
+		return -EBADMSG;
+	}
+
+	BUG_ON(numerrs < 0);	/* -EINVAL, or anything other than -EBADMSG */
+
+	/* undo last step in BCH alg (modulo mirroring not needed) */
+	for (i = 0; i < numerrs; i++)
+		errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
+
+	/* fix the errors */
+	for (i = 0; i < numerrs; i++) {
+
+		/* ignore if error within oob ecc bytes */
+		if (errpos[i] > DOCG4_USERDATA_LEN * 8)
+			continue;
+
+		/* if error within oob area preceeding ecc bytes... */
+		if (errpos[i] > DOCG4_PAGE_SIZE * 8)
+			change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
+				   (unsigned long *)doc->oob_buf);
+
+		else    /* error in page data */
+			change_bit(errpos[i], (unsigned long *)buf);
+	}
+
+	dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
+		   numerrs, page * DOCG4_PAGE_SIZE);
+
+	return numerrs;
+}
+
+static uint8_t docg4_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+
+	dev_dbg(doc->dev, "%s\n", __func__);
+
+	if (doc->last_command.command == NAND_CMD_STATUS) {
+		int status;
+
+		/*
+		 * Previous nand command was status request, so nand
+		 * infrastructure code expects to read the status here.  If an
+		 * error occurred in a previous operation, report it.
+		 */
+		doc->last_command.command = 0;
+
+		if (doc->status) {
+			status = doc->status;
+			doc->status = 0;
+		}
+
+		/* why is NAND_STATUS_WP inverse logic?? */
+		else
+			status = NAND_STATUS_WP | NAND_STATUS_READY;
+
+		return status;
+	}
+
+	dev_warn(doc->dev, "unexpectd call to read_byte()\n");
+
+	return 0;
+}
+
+static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
+{
+	/* write the four address bytes packed in docg4_addr to the device */
+
+	void __iomem *docptr = doc->virtadr;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+	docg4_addr >>= 8;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+	docg4_addr >>= 8;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+	docg4_addr >>= 8;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+}
+
+static int read_progstatus(struct docg4_priv *doc)
+{
+	/*
+	 * This apparently checks the status of programming.  Done after an
+	 * erasure, and after page data is written.  On error, the status is
+	 * saved, to be later retrieved by the nand infrastructure code.
+	 */
+	void __iomem *docptr = doc->virtadr;
+
+	/* status is read from the I/O reg */
+	uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
+	uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
+	uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
+
+	dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
+	      __func__, status1, status2, status3);
+
+	if (status1 != DOCG4_PROGSTATUS_GOOD
+	    || status2 != DOCG4_PROGSTATUS_GOOD_2
+	    || status3 != DOCG4_PROGSTATUS_GOOD_2) {
+		doc->status = NAND_STATUS_FAIL;
+		dev_warn(doc->dev, "read_progstatus failed: "
+			 "%02x, %02x, %02x\n", status1, status2, status3);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int pageprog(struct mtd_info *mtd)
+{
+	/*
+	 * Final step in writing a page.  Writes the contents of its
+	 * internal buffer out to the flash array, or some such.
+	 */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+	int retval = 0;
+
+	dev_dbg(doc->dev, "docg4: %s\n", __func__);
+
+	writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
+	writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* Just busy-wait; usleep_range() slows things down noticeably. */
+	poll_status(doc);
+
+	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	retval = read_progstatus(doc);
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+	poll_status(doc);
+	write_nop(docptr);
+
+	return retval;
+}
+
+static void sequence_reset(struct mtd_info *mtd)
+{
+	/* common starting sequence for all operations */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+
+	writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
+	writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
+	writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+	poll_status(doc);
+	write_nop(docptr);
+}
+
+static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+	/* first step in reading a page */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev,
+	      "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
+
+	sequence_reset(mtd);
+
+	writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+
+	write_addr(doc, docg4_addr);
+
+	write_nop(docptr);
+	writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	poll_status(doc);
+}
+
+static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+	/* first step in writing a page */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev,
+	      "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
+	sequence_reset(mtd);
+	writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_addr(doc, docg4_addr);
+	write_nop(docptr);
+	write_nop(docptr);
+	poll_status(doc);
+}
+
+static uint32_t mtd_to_docg4_address(int page, int column)
+{
+	/*
+	 * Convert mtd address to format used by the device, 32 bit packed.
+	 *
+	 * Some notes on G4 addressing... The M-Sys documentation on this device
+	 * claims that pages are 2K in length, and indeed, the format of the
+	 * address used by the device reflects that.  But within each page are
+	 * four 512 byte "sub-pages", each with its own oob data that is
+	 * read/written immediately after the 512 bytes of page data.  This oob
+	 * data contains the ecc bytes for the preceeding 512 bytes.
+	 *
+	 * Rather than tell the mtd nand infrastructure that page size is 2k,
+	 * with four sub-pages each, we engage in a little subterfuge and tell
+	 * the infrastructure code that pages are 512 bytes in size.  This is
+	 * done because during the course of reverse-engineering the device, I
+	 * never observed an instance where an entire 2K "page" was read or
+	 * written as a unit.  Each "sub-page" is always addressed individually,
+	 * its data read/written, and ecc handled before the next "sub-page" is
+	 * addressed.
+	 *
+	 * This requires us to convert addresses passed by the mtd nand
+	 * infrastructure code to those used by the device.
+	 *
+	 * The address that is written to the device consists of four bytes: the
+	 * first two are the 2k page number, and the second is the index into
+	 * the page.  The index is in terms of 16-bit half-words and includes
+	 * the preceeding oob data, so e.g., the index into the second
+	 * "sub-page" is 0x108, and the full device address of the start of mtd
+	 * page 0x201 is 0x00800108.
+	 */
+	int g4_page = page / 4;	                      /* device's 2K page */
+	int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
+	return (g4_page << 16) | g4_index;	      /* pack */
+}
+
+static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
+			  int page_addr)
+{
+	/* handle standard nand commands */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
+
+	dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
+	      __func__, command, page_addr, column);
+
+	/*
+	 * Save the command and its arguments.  This enables emulation of
+	 * standard flash devices, and also some optimizations.
+	 */
+	doc->last_command.command = command;
+	doc->last_command.column = column;
+	doc->last_command.page = page_addr;
+
+	switch (command) {
+
+	case NAND_CMD_RESET:
+		reset(mtd);
+		break;
+
+	case NAND_CMD_READ0:
+		read_page_prologue(mtd, g4_addr);
+		break;
+
+	case NAND_CMD_STATUS:
+		/* next call to read_byte() will expect a status */
+		break;
+
+	case NAND_CMD_SEQIN:
+		write_page_prologue(mtd, g4_addr);
+
+		/* hack for deferred write of oob bytes */
+		if (doc->oob_page == page_addr)
+			memcpy(nand->oob_poi, doc->oob_buf, 16);
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		pageprog(mtd);
+		break;
+
+	/* we don't expect these, based on review of nand_base.c */
+	case NAND_CMD_READOOB:
+	case NAND_CMD_READID:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+		dev_warn(doc->dev, "docg4_command: "
+			 "unexpected nand command 0x%x\n", command);
+		break;
+
+	}
+}
+
+static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
+		     uint8_t *buf, int page, bool use_ecc)
+{
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+	uint16_t status, edc_err, *buf16;
+
+	dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+
+	writew(DOC_ECCCONF0_READ_MODE |
+	       DOC_ECCCONF0_ECC_ENABLE |
+	       DOC_ECCCONF0_UNKNOWN |
+	       DOCG4_BCH_SIZE,
+	       docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* the 1st byte from the I/O reg is a status; the rest is page data */
+	status = readw(docptr + DOC_IOSPACE_DATA);
+	if (status & DOCG4_READ_ERROR) {
+		dev_err(doc->dev,
+			"docg4_read_page: bad status: 0x%02x\n", status);
+		writew(0, docptr + DOC_DATAEND);
+		return -EIO;
+	}
+
+	dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+	docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
+
+	/*
+	 * Diskonchips read oob immediately after a page read.  Mtd
+	 * infrastructure issues a separate command for reading oob after the
+	 * page is read.  So we save the oob bytes in a local buffer and just
+	 * copy it if the next command reads oob from the same page.
+	 */
+
+	/* first 14 oob bytes read from I/O reg */
+	docg4_read_buf(mtd, doc->oob_buf, 14);
+
+	/* last 2 read from another reg */
+	buf16 = (uint16_t *)(doc->oob_buf + 14);
+	*buf16 = readw(docptr + DOCG4_MYSTERY_REG);
+
+	write_nop(docptr);
+
+	if (likely(use_ecc == true)) {
+
+		/* read the register that tells us if bitflip(s) detected  */
+		edc_err = readw(docptr + DOC_ECCCONF1);
+		edc_err = readw(docptr + DOC_ECCCONF1);
+		dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
+
+		/* If bitflips are reported, attempt to correct with ecc */
+		if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
+			int bits_corrected = correct_data(mtd, buf, page);
+			if (bits_corrected == -EBADMSG)
+				mtd->ecc_stats.failed++;
+			else
+				mtd->ecc_stats.corrected += bits_corrected;
+		}
+	}
+
+	writew(0, docptr + DOC_DATAEND);
+	return 0;
+}
+
+
+static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+			       uint8_t *buf, int page)
+{
+	return read_page(mtd, nand, buf, page, false);
+}
+
+static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
+			   uint8_t *buf, int page)
+{
+	return read_page(mtd, nand, buf, page, true);
+}
+
+static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
+			  int page, int sndcmd)
+{
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+	uint16_t status;
+
+	dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
+
+	/*
+	 * Oob bytes are read as part of a normal page read.  If the previous
+	 * nand command was a read of the page whose oob is now being read, just
+	 * copy the oob bytes that we saved in a local buffer and avoid a
+	 * separate oob read.
+	 */
+	if (doc->last_command.command == NAND_CMD_READ0 &&
+	    doc->last_command.page == page) {
+		memcpy(nand->oob_poi, doc->oob_buf, 16);
+		return 0;
+	}
+
+	/*
+	 * Separate read of oob data only.
+	 */
+	docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+
+	writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* the 1st byte from the I/O reg is a status; the rest is oob data */
+	status = readw(docptr + DOC_IOSPACE_DATA);
+	if (status & DOCG4_READ_ERROR) {
+		dev_warn(doc->dev,
+			 "docg4_read_oob failed: status = 0x%02x\n", status);
+		return -EIO;
+	}
+
+	dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+	docg4_read_buf(mtd, nand->oob_poi, 16);
+
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+
+	return 0;
+}
+
+static void docg4_erase_block(struct mtd_info *mtd, int page)
+{
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+	uint16_t g4_page;
+
+	dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
+
+	sequence_reset(mtd);
+
+	writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
+	writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+
+	/* only 2 bytes of address are written to specify erase block */
+	g4_page = (uint16_t)(page / 4);  /* to g4's 2k page addressing */
+	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+	g4_page >>= 8;
+	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+	write_nop(docptr);
+
+	/* start the erasure */
+	writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	usleep_range(500, 1000); /* erasure is long; take a snooze */
+	poll_status(doc);
+	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	read_progstatus(doc);
+
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+	poll_status(doc);
+	write_nop(docptr);
+}
+
+static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
+		       const uint8_t *buf, bool use_ecc)
+{
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+	uint8_t ecc_buf[8];
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	writew(DOC_ECCCONF0_ECC_ENABLE |
+	       DOC_ECCCONF0_UNKNOWN |
+	       DOCG4_BCH_SIZE,
+	       docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+
+	/* write the page data */
+	docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
+
+	/* oob bytes 0 through 5 are written to I/O reg */
+	docg4_write_buf16(mtd, nand->oob_poi, 6);
+
+	/* oob byte 6 written to a separate reg */
+	writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
+
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* write hw-generated ecc bytes to oob */
+	if (likely(use_ecc == true)) {
+		/* oob byte 7 is hamming code */
+		uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
+		hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
+		writew(hamming, docptr + DOCG4_OOB_6_7);
+		write_nop(docptr);
+
+		/* read the 7 bch bytes from ecc regs */
+		read_hw_ecc(docptr, ecc_buf);
+		ecc_buf[7] = 0;         /* clear the "page written" flag */
+	}
+
+	/* write user-supplied bytes to oob */
+	else {
+		writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
+		write_nop(docptr);
+		memcpy(ecc_buf, &nand->oob_poi[8], 8);
+	}
+
+	docg4_write_buf16(mtd, ecc_buf, 8);
+	write_nop(docptr);
+	write_nop(docptr);
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+}
+
+static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+				 const uint8_t *buf)
+{
+	return write_page(mtd, nand, buf, false);
+}
+
+static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+			     const uint8_t *buf)
+{
+	return write_page(mtd, nand, buf, true);
+}
+
+static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
+			   int page)
+{
+	/*
+	 * Writing oob-only is not really supported, because MLC nand must write
+	 * oob bytes at the same time as page data.  Nonetheless, we save the
+	 * oob buffer contents here, and then write it along with the page data
+	 * if the same page is subsequently written.  This allows user space
+	 * utilities that write the oob data prior to the page data to work
+	 * (e.g., nandwrite).  The disdvantage is that, if the intention was to
+	 * write oob only, the operation is quietly ignored.  Also, oob can get
+	 * corrupted if two concurrent processes are running nandwrite.
+	 */
+
+	/* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
+	struct docg4_priv *doc = nand->priv;
+	doc->oob_page = page;
+	memcpy(doc->oob_buf, nand->oob_poi, 16);
+	return 0;
+}
+
+static int __init read_factory_bbt(struct mtd_info *mtd)
+{
+	/*
+	 * The device contains a read-only factory bad block table.  Read it and
+	 * update the memory-based bbt accordingly.
+	 */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
+	uint8_t *buf;
+	int i, block, status;
+
+	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	read_page_prologue(mtd, g4_addr);
+	status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+	if (status)
+		goto exit;
+
+	/*
+	 * If no memory-based bbt was created, exit.  This will happen if module
+	 * parameter ignore_badblocks is set.  Then why even call this function?
+	 * For an unknown reason, block erase always fails if it's the first
+	 * operation after device power-up.  The above read ensures it never is.
+	 * Ugly, I know.
+	 */
+	if (nand->bbt == NULL)  /* no memory-based bbt */
+		goto exit;
+
+	/*
+	 * Parse factory bbt and update memory-based bbt.  Factory bbt format is
+	 * simple: one bit per block, block numbers increase left to right (msb
+	 * to lsb).  Bit clear means bad block.
+	 */
+	for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
+		int bitnum;
+		unsigned long bits = ~buf[i];
+		for_each_set_bit(bitnum, &bits, 8) {
+			int badblock = block + 7 - bitnum;
+			nand->bbt[badblock / 4] |=
+				0x03 << ((badblock % 4) * 2);
+			mtd->ecc_stats.badblocks++;
+			dev_notice(doc->dev, "factory-marked bad block: %d\n",
+				   badblock);
+		}
+	}
+ exit:
+	kfree(buf);
+	return status;
+}
+
+static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	/*
+	 * Mark a block as bad.  Bad blocks are marked in the oob area of the
+	 * first page of the block.  The default scan_bbt() in the nand
+	 * infrastructure code works fine for building the memory-based bbt
+	 * during initialization, as does the nand infrastructure function that
+	 * checks if a block is bad by reading the bbt.  This function replaces
+	 * the nand default because writes to oob-only are not supported.
+	 */
+
+	int ret, i;
+	uint8_t *buf;
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	struct nand_bbt_descr *bbtd = nand->badblock_pattern;
+	int block = (int)(ofs >> nand->bbt_erase_shift);
+	int page = (int)(ofs >> nand->page_shift);
+	uint32_t g4_addr = mtd_to_docg4_address(page, 0);
+
+	dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
+
+	if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
+		dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
+			 __func__, ofs);
+
+	/* allocate blank buffer for page data */
+	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	/* update bbt in memory */
+	nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
+
+	/* write bit-wise negation of pattern to oob buffer */
+	memset(nand->oob_poi, 0xff, mtd->oobsize);
+	for (i = 0; i < bbtd->len; i++)
+		nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
+
+	/* write first page of block */
+	write_page_prologue(mtd, g4_addr);
+	docg4_write_page(mtd, nand, buf);
+	ret = pageprog(mtd);
+	if (!ret)
+		mtd->ecc_stats.badblocks++;
+
+	kfree(buf);
+
+	return ret;
+}
+
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+	/* only called when module_param ignore_badblocks is set */
+	return 0;
+}
+
+static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	/*
+	 * Put the device into "deep power-down" mode.  Note that CE# must be
+	 * deasserted for this to take effect.  The xscale, e.g., can be
+	 * configured to float this signal when the processor enters power-down,
+	 * and a suitable pull-up ensures its deassertion.
+	 */
+
+	int i;
+	uint8_t pwr_down;
+	struct docg4_priv *doc = platform_get_drvdata(pdev);
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	/* poll the register that tells us we're ready to go to sleep */
+	for (i = 0; i < 10; i++) {
+		pwr_down = readb(docptr + DOC_POWERMODE);
+		if (pwr_down & DOC_POWERDOWN_READY)
+			break;
+		usleep_range(1000, 4000);
+	}
+
+	if (pwr_down & DOC_POWERDOWN_READY) {
+		dev_err(doc->dev, "suspend failed; "
+			"timeout polling DOC_POWERDOWN_READY\n");
+		return -EIO;
+	}
+
+	writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
+	       docptr + DOC_ASICMODE);
+	writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
+	       docptr + DOC_ASICMODECONFIRM);
+
+	write_nop(docptr);
+
+	return 0;
+}
+
+static int docg4_resume(struct platform_device *pdev)
+{
+
+	/*
+	 * Exit power-down.  Twelve consecutive reads of the address below
+	 * accomplishes this, assuming CE# has been asserted.
+	 */
+
+	struct docg4_priv *doc = platform_get_drvdata(pdev);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	for (i = 0; i < 12; i++)
+		readb(docptr + 0x1fff);
+
+	return 0;
+}
+
+static void __init init_mtd_structs(struct mtd_info *mtd)
+{
+	/* initialize mtd and nand data structures */
+
+	/*
+	 * Note that some of the following initializations are not usually
+	 * required within a nand driver because they are performed by the nand
+	 * infrastructure code as part of nand_scan().  In this case they need
+	 * to be initialized here because we skip call to nand_scan_ident() (the
+	 * first half of nand_scan()).  The call to nand_scan_ident() is skipped
+	 * because for this device the chip id is not read in the manner of a
+	 * standard nand device.  Unfortunately, nand_scan_ident() does other
+	 * things as well, such as call nand_set_defaults().
+	 */
+
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+
+	mtd->size = DOCG4_CHIP_SIZE;
+	mtd->name = "Msys_Diskonchip_G4";
+	mtd->writesize = DOCG4_PAGE_SIZE;
+	mtd->erasesize = DOCG4_BLOCK_SIZE;
+	mtd->oobsize = DOCG4_OOB_SIZE;
+	nand->chipsize = DOCG4_CHIP_SIZE;
+	nand->chip_shift = DOCG4_CHIP_SHIFT;
+	nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
+	nand->chip_delay = 20;
+	nand->page_shift = DOCG4_PAGE_SHIFT;
+	nand->pagemask = 0x3ffff;
+	nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
+	nand->badblockbits = 8;
+	nand->ecc.layout = &docg4_oobinfo;
+	nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+	nand->ecc.size = DOCG4_PAGE_SIZE;
+	nand->ecc.prepad = 8;
+	nand->ecc.bytes	= 8;
+	nand->ecc.strength = DOCG4_T;
+	nand->options =
+		NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+	nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
+	nand->controller = &nand->hwcontrol;
+	spin_lock_init(&nand->controller->lock);
+	init_waitqueue_head(&nand->controller->wq);
+
+	/* methods */
+	nand->cmdfunc = docg4_command;
+	nand->waitfunc = docg4_wait;
+	nand->select_chip = docg4_select_chip;
+	nand->read_byte = docg4_read_byte;
+	nand->block_markbad = docg4_block_markbad;
+	nand->read_buf = docg4_read_buf;
+	nand->write_buf = docg4_write_buf16;
+	nand->scan_bbt = nand_default_bbt;
+	nand->erase_cmd = docg4_erase_block;
+	nand->ecc.read_page = docg4_read_page;
+	nand->ecc.write_page = docg4_write_page;
+	nand->ecc.read_page_raw = docg4_read_page_raw;
+	nand->ecc.write_page_raw = docg4_write_page_raw;
+	nand->ecc.read_oob = docg4_read_oob;
+	nand->ecc.write_oob = docg4_write_oob;
+
+	/*
+	 * The way the nand infrastructure code is written, a memory-based bbt
+	 * is not created if NAND_SKIP_BBTSCAN is set.  With no memory bbt,
+	 * nand->block_bad() is used.  So when ignoring bad blocks, we skip the
+	 * scan and define a dummy block_bad() which always returns 0.
+	 */
+	if (ignore_badblocks) {
+		nand->options |= NAND_SKIP_BBTSCAN;
+		nand->block_bad	= docg4_block_neverbad;
+	}
+
+}
+
+static int __init read_id_reg(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd->priv;
+	struct docg4_priv *doc = nand->priv;
+	void __iomem *docptr = doc->virtadr;
+	uint16_t id1, id2;
+
+	/* check for presence of g4 chip by reading id registers */
+	id1 = readw(docptr + DOC_CHIPID);
+	id1 = readw(docptr + DOCG4_MYSTERY_REG);
+	id2 = readw(docptr + DOC_CHIPID_INV);
+	id2 = readw(docptr + DOCG4_MYSTERY_REG);
+
+	if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
+		dev_info(doc->dev,
+			 "NAND device: 128MiB Diskonchip G4 detected\n");
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int __init probe_docg4(struct platform_device *pdev)
+{
+	struct mtd_info *mtd;
+	struct nand_chip *nand;
+	void __iomem *virtadr;
+	struct docg4_priv *doc;
+	int len, retval;
+	struct resource *r;
+	struct device *dev = &pdev->dev;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (r == NULL) {
+		dev_err(dev, "no io memory resource defined!\n");
+		return -ENODEV;
+	}
+
+	virtadr = ioremap(r->start, resource_size(r));
+	if (!virtadr) {
+		dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
+		return -EIO;
+	}
+
+	len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
+		sizeof(struct docg4_priv);
+	mtd = kzalloc(len, GFP_KERNEL);
+	if (mtd == NULL) {
+		retval = -ENOMEM;
+		goto fail;
+	}
+	nand = (struct nand_chip *) (mtd + 1);
+	doc = (struct docg4_priv *) (nand + 1);
+	mtd->priv = nand;
+	nand->priv = doc;
+	mtd->owner = THIS_MODULE;
+	doc->virtadr = virtadr;
+	doc->dev = dev;
+
+	init_mtd_structs(mtd);
+
+	/* initialize kernel bch algorithm */
+	doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+	if (doc->bch == NULL) {
+		retval = -EINVAL;
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, doc);
+
+	reset(mtd);
+	retval = read_id_reg(mtd);
+	if (retval == -ENODEV) {
+		dev_warn(dev, "No diskonchip G4 device found.\n");
+		goto fail;
+	}
+
+	retval = nand_scan_tail(mtd);
+	if (retval)
+		goto fail;
+
+	retval = read_factory_bbt(mtd);
+	if (retval)
+		goto fail;
+
+	retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+	if (retval)
+		goto fail;
+
+	doc->mtd = mtd;
+	return 0;
+
+ fail:
+	iounmap(virtadr);
+	if (mtd) {
+		/* re-declarations avoid compiler warning */
+		struct nand_chip *nand = mtd->priv;
+		struct docg4_priv *doc = nand->priv;
+		nand_release(mtd); /* deletes partitions and mtd devices */
+		platform_set_drvdata(pdev, NULL);
+		free_bch(doc->bch);
+		kfree(mtd);
+	}
+
+	return retval;
+}
+
+static int __exit cleanup_docg4(struct platform_device *pdev)
+{
+	struct docg4_priv *doc = platform_get_drvdata(pdev);
+	nand_release(doc->mtd);
+	platform_set_drvdata(pdev, NULL);
+	free_bch(doc->bch);
+	kfree(doc->mtd);
+	iounmap(doc->virtadr);
+	return 0;
+}
+
+static struct platform_driver docg4_driver = {
+	.driver		= {
+		.name	= "docg4",
+		.owner	= THIS_MODULE,
+	},
+	.suspend	= docg4_suspend,
+	.resume		= docg4_resume,
+	.remove		= __exit_p(cleanup_docg4),
+};
+
+static int __init docg4_init(void)
+{
+	return platform_driver_probe(&docg4_driver, probe_docg4);
+}
+
+static void __exit docg4_exit(void)
+{
+	platform_driver_unregister(&docg4_driver);
+}
+
+module_init(docg4_init);
+module_exit(docg4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Dunn");
+MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 7195ee6..80b5264 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -813,6 +813,12 @@
 				&fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
 		chip->ecc.size = 512;
 		chip->ecc.bytes = 3;
+		chip->ecc.strength = 1;
+		/*
+		 * FIXME: can hardware ecc correct 4 bitflips if page size is
+		 * 2k?  Then does hardware report number of corrections for this
+		 * case?  If so, ecc_stats reporting needs to be fixed as well.
+		 */
 	} else {
 		/* otherwise fall back to default software ECC */
 		chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e53b760..1b8330e 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -17,6 +17,10 @@
  */
 
 #include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -27,6 +31,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/platform_device.h>
+#include <linux/of.h>
 #include <linux/mtd/partitions.h>
 #include <linux/io.h>
 #include <linux/slab.h>
@@ -34,7 +39,7 @@
 #include <linux/amba/bus.h>
 #include <mtd/mtd-abi.h>
 
-static struct nand_ecclayout fsmc_ecc1_layout = {
+static struct nand_ecclayout fsmc_ecc1_128_layout = {
 	.eccbytes = 24,
 	.eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
 		66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
@@ -50,7 +55,127 @@
 	}
 };
 
-static struct nand_ecclayout fsmc_ecc4_lp_layout = {
+static struct nand_ecclayout fsmc_ecc1_64_layout = {
+	.eccbytes = 12,
+	.eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
+	.oobfree = {
+		{.offset = 8, .length = 8},
+		{.offset = 24, .length = 8},
+		{.offset = 40, .length = 8},
+		{.offset = 56, .length = 8},
+	}
+};
+
+static struct nand_ecclayout fsmc_ecc1_16_layout = {
+	.eccbytes = 3,
+	.eccpos = {2, 3, 4},
+	.oobfree = {
+		{.offset = 8, .length = 8},
+	}
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
+ * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_256_layout = {
+	.eccbytes = 208,
+	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
+		9,  10,  11,  12,  13,  14,
+		18,  19,  20,  21,  22,  23,  24,
+		25,  26,  27,  28,  29,  30,
+		34,  35,  36,  37,  38,  39,  40,
+		41,  42,  43,  44,  45,  46,
+		50,  51,  52,  53,  54,  55,  56,
+		57,  58,  59,  60,  61,  62,
+		66,  67,  68,  69,  70,  71,  72,
+		73,  74,  75,  76,  77,  78,
+		82,  83,  84,  85,  86,  87,  88,
+		89,  90,  91,  92,  93,  94,
+		98,  99, 100, 101, 102, 103, 104,
+		105, 106, 107, 108, 109, 110,
+		114, 115, 116, 117, 118, 119, 120,
+		121, 122, 123, 124, 125, 126,
+		130, 131, 132, 133, 134, 135, 136,
+		137, 138, 139, 140, 141, 142,
+		146, 147, 148, 149, 150, 151, 152,
+		153, 154, 155, 156, 157, 158,
+		162, 163, 164, 165, 166, 167, 168,
+		169, 170, 171, 172, 173, 174,
+		178, 179, 180, 181, 182, 183, 184,
+		185, 186, 187, 188, 189, 190,
+		194, 195, 196, 197, 198, 199, 200,
+		201, 202, 203, 204, 205, 206,
+		210, 211, 212, 213, 214, 215, 216,
+		217, 218, 219, 220, 221, 222,
+		226, 227, 228, 229, 230, 231, 232,
+		233, 234, 235, 236, 237, 238,
+		242, 243, 244, 245, 246, 247, 248,
+		249, 250, 251, 252, 253, 254
+	},
+	.oobfree = {
+		{.offset = 15, .length = 3},
+		{.offset = 31, .length = 3},
+		{.offset = 47, .length = 3},
+		{.offset = 63, .length = 3},
+		{.offset = 79, .length = 3},
+		{.offset = 95, .length = 3},
+		{.offset = 111, .length = 3},
+		{.offset = 127, .length = 3},
+		{.offset = 143, .length = 3},
+		{.offset = 159, .length = 3},
+		{.offset = 175, .length = 3},
+		{.offset = 191, .length = 3},
+		{.offset = 207, .length = 3},
+		{.offset = 223, .length = 3},
+		{.offset = 239, .length = 3},
+		{.offset = 255, .length = 1}
+	}
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_224_layout = {
+	.eccbytes = 104,
+	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
+		9,  10,  11,  12,  13,  14,
+		18,  19,  20,  21,  22,  23,  24,
+		25,  26,  27,  28,  29,  30,
+		34,  35,  36,  37,  38,  39,  40,
+		41,  42,  43,  44,  45,  46,
+		50,  51,  52,  53,  54,  55,  56,
+		57,  58,  59,  60,  61,  62,
+		66,  67,  68,  69,  70,  71,  72,
+		73,  74,  75,  76,  77,  78,
+		82,  83,  84,  85,  86,  87,  88,
+		89,  90,  91,  92,  93,  94,
+		98,  99, 100, 101, 102, 103, 104,
+		105, 106, 107, 108, 109, 110,
+		114, 115, 116, 117, 118, 119, 120,
+		121, 122, 123, 124, 125, 126
+	},
+	.oobfree = {
+		{.offset = 15, .length = 3},
+		{.offset = 31, .length = 3},
+		{.offset = 47, .length = 3},
+		{.offset = 63, .length = 3},
+		{.offset = 79, .length = 3},
+		{.offset = 95, .length = 3},
+		{.offset = 111, .length = 3},
+		{.offset = 127, .length = 97}
+	}
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_128_layout = {
 	.eccbytes = 104,
 	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
 		9,  10,  11,  12,  13,  14,
@@ -82,6 +207,45 @@
 };
 
 /*
+ * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
+ * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_64_layout = {
+	.eccbytes = 52,
+	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
+		9,  10,  11,  12,  13,  14,
+		18,  19,  20,  21,  22,  23,  24,
+		25,  26,  27,  28,  29,  30,
+		34,  35,  36,  37,  38,  39,  40,
+		41,  42,  43,  44,  45,  46,
+		50,  51,  52,  53,  54,  55,  56,
+		57,  58,  59,  60,  61,  62,
+	},
+	.oobfree = {
+		{.offset = 15, .length = 3},
+		{.offset = 31, .length = 3},
+		{.offset = 47, .length = 3},
+		{.offset = 63, .length = 1},
+	}
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
+ * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
+ * byte is free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_16_layout = {
+	.eccbytes = 13,
+	.eccpos = { 0,  1,  2,  3,  6,  7, 8,
+		9, 10, 11, 12, 13, 14
+	},
+	.oobfree = {
+		{.offset = 15, .length = 1},
+	}
+};
+
+/*
  * ECC placement definitions in oobfree type format.
  * There are 13 bytes of ecc for every 512 byte block and it has to be read
  * consecutively and immediately after the 512 byte data block for hardware to
@@ -103,16 +267,6 @@
 	}
 };
 
-static struct nand_ecclayout fsmc_ecc4_sp_layout = {
-	.eccbytes = 13,
-	.eccpos = { 0,  1,  2,  3,  6,  7, 8,
-		9, 10, 11, 12, 13, 14
-	},
-	.oobfree = {
-		{.offset = 15, .length = 1},
-	}
-};
-
 static struct fsmc_eccplace fsmc_ecc4_sp_place = {
 	.eccplace = {
 		{.offset = 0, .length = 4},
@@ -120,75 +274,24 @@
 	}
 };
 
-/*
- * Default partition tables to be used if the partition information not
- * provided through platform data.
- *
- * Default partition layout for small page(= 512 bytes) devices
- * Size for "Root file system" is updated in driver based on actual device size
- */
-static struct mtd_partition partition_info_16KB_blk[] = {
-	{
-		.name = "X-loader",
-		.offset = 0,
-		.size = 4*0x4000,
-	},
-	{
-		.name = "U-Boot",
-		.offset = 0x10000,
-		.size = 20*0x4000,
-	},
-	{
-		.name = "Kernel",
-		.offset = 0x60000,
-		.size = 256*0x4000,
-	},
-	{
-		.name = "Root File System",
-		.offset = 0x460000,
-		.size = MTDPART_SIZ_FULL,
-	},
-};
-
-/*
- * Default partition layout for large page(> 512 bytes) devices
- * Size for "Root file system" is updated in driver based on actual device size
- */
-static struct mtd_partition partition_info_128KB_blk[] = {
-	{
-		.name = "X-loader",
-		.offset = 0,
-		.size = 4*0x20000,
-	},
-	{
-		.name = "U-Boot",
-		.offset = 0x80000,
-		.size = 12*0x20000,
-	},
-	{
-		.name = "Kernel",
-		.offset = 0x200000,
-		.size = 48*0x20000,
-	},
-	{
-		.name = "Root File System",
-		.offset = 0x800000,
-		.size = MTDPART_SIZ_FULL,
-	},
-};
-
-
 /**
  * struct fsmc_nand_data - structure for FSMC NAND device state
  *
  * @pid:		Part ID on the AMBA PrimeCell format
  * @mtd:		MTD info for a NAND flash.
  * @nand:		Chip related info for a NAND flash.
+ * @partitions:		Partition info for a NAND Flash.
+ * @nr_partitions:	Total number of partition of a NAND flash.
  *
  * @ecc_place:		ECC placing locations in oobfree type format.
  * @bank:		Bank number for probed device.
  * @clk:		Clock structure for FSMC.
  *
+ * @read_dma_chan:	DMA channel for read access
+ * @write_dma_chan:	DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @data_pa:		NAND Physical port for Data.
  * @data_va:		NAND port for Data.
  * @cmd_va:		NAND port for Command.
  * @addr_va:		NAND port for Address.
@@ -198,16 +301,23 @@
 	u32			pid;
 	struct mtd_info		mtd;
 	struct nand_chip	nand;
+	struct mtd_partition	*partitions;
+	unsigned int		nr_partitions;
 
 	struct fsmc_eccplace	*ecc_place;
 	unsigned int		bank;
+	struct device		*dev;
+	enum access_mode	mode;
 	struct clk		*clk;
 
-	struct resource		*resregs;
-	struct resource		*rescmd;
-	struct resource		*resaddr;
-	struct resource		*resdata;
+	/* DMA related objects */
+	struct dma_chan		*read_dma_chan;
+	struct dma_chan		*write_dma_chan;
+	struct completion	dma_access_complete;
 
+	struct fsmc_nand_timings *dev_timings;
+
+	dma_addr_t		data_pa;
 	void __iomem		*data_va;
 	void __iomem		*cmd_va;
 	void __iomem		*addr_va;
@@ -251,28 +361,29 @@
 	struct nand_chip *this = mtd->priv;
 	struct fsmc_nand_data *host = container_of(mtd,
 					struct fsmc_nand_data, mtd);
-	struct fsmc_regs *regs = host->regs_va;
+	void *__iomem *regs = host->regs_va;
 	unsigned int bank = host->bank;
 
 	if (ctrl & NAND_CTRL_CHANGE) {
+		u32 pc;
+
 		if (ctrl & NAND_CLE) {
-			this->IO_ADDR_R = (void __iomem *)host->cmd_va;
-			this->IO_ADDR_W = (void __iomem *)host->cmd_va;
+			this->IO_ADDR_R = host->cmd_va;
+			this->IO_ADDR_W = host->cmd_va;
 		} else if (ctrl & NAND_ALE) {
-			this->IO_ADDR_R = (void __iomem *)host->addr_va;
-			this->IO_ADDR_W = (void __iomem *)host->addr_va;
+			this->IO_ADDR_R = host->addr_va;
+			this->IO_ADDR_W = host->addr_va;
 		} else {
-			this->IO_ADDR_R = (void __iomem *)host->data_va;
-			this->IO_ADDR_W = (void __iomem *)host->data_va;
+			this->IO_ADDR_R = host->data_va;
+			this->IO_ADDR_W = host->data_va;
 		}
 
-		if (ctrl & NAND_NCE) {
-			writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE,
-					&regs->bank_regs[bank].pc);
-		} else {
-			writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE,
-				       &regs->bank_regs[bank].pc);
-		}
+		pc = readl(FSMC_NAND_REG(regs, bank, PC));
+		if (ctrl & NAND_NCE)
+			pc |= FSMC_ENABLE;
+		else
+			pc &= ~FSMC_ENABLE;
+		writel(pc, FSMC_NAND_REG(regs, bank, PC));
 	}
 
 	mb();
@@ -287,22 +398,42 @@
  * This routine initializes timing parameters related to NAND memory access in
  * FSMC registers
  */
-static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank,
-				   uint32_t busw)
+static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
+			   uint32_t busw, struct fsmc_nand_timings *timings)
 {
 	uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+	uint32_t tclr, tar, thiz, thold, twait, tset;
+	struct fsmc_nand_timings *tims;
+	struct fsmc_nand_timings default_timings = {
+		.tclr	= FSMC_TCLR_1,
+		.tar	= FSMC_TAR_1,
+		.thiz	= FSMC_THIZ_1,
+		.thold	= FSMC_THOLD_4,
+		.twait	= FSMC_TWAIT_6,
+		.tset	= FSMC_TSET_0,
+	};
+
+	if (timings)
+		tims = timings;
+	else
+		tims = &default_timings;
+
+	tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+	tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+	thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+	thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+	twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+	tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
 
 	if (busw)
-		writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc);
+		writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
 	else
-		writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc);
+		writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
 
-	writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1,
-	       &regs->bank_regs[bank].pc);
-	writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
-	       &regs->bank_regs[bank].comm);
-	writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
-	       &regs->bank_regs[bank].attrib);
+	writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+			FSMC_NAND_REG(regs, bank, PC));
+	writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
+	writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
 }
 
 /*
@@ -312,15 +443,15 @@
 {
 	struct fsmc_nand_data *host = container_of(mtd,
 					struct fsmc_nand_data, mtd);
-	struct fsmc_regs *regs = host->regs_va;
+	void __iomem *regs = host->regs_va;
 	uint32_t bank = host->bank;
 
-	writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256,
-		       &regs->bank_regs[bank].pc);
-	writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN,
-			&regs->bank_regs[bank].pc);
-	writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN,
-			&regs->bank_regs[bank].pc);
+	writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+			FSMC_NAND_REG(regs, bank, PC));
+	writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+			FSMC_NAND_REG(regs, bank, PC));
+	writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+			FSMC_NAND_REG(regs, bank, PC));
 }
 
 /*
@@ -333,37 +464,42 @@
 {
 	struct fsmc_nand_data *host = container_of(mtd,
 					struct fsmc_nand_data, mtd);
-	struct fsmc_regs *regs = host->regs_va;
+	void __iomem *regs = host->regs_va;
 	uint32_t bank = host->bank;
 	uint32_t ecc_tmp;
 	unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
 
 	do {
-		if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY)
+		if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
 			break;
 		else
 			cond_resched();
 	} while (!time_after_eq(jiffies, deadline));
 
-	ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+	if (time_after_eq(jiffies, deadline)) {
+		dev_err(host->dev, "calculate ecc timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
 	ecc[0] = (uint8_t) (ecc_tmp >> 0);
 	ecc[1] = (uint8_t) (ecc_tmp >> 8);
 	ecc[2] = (uint8_t) (ecc_tmp >> 16);
 	ecc[3] = (uint8_t) (ecc_tmp >> 24);
 
-	ecc_tmp = readl(&regs->bank_regs[bank].ecc2);
+	ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
 	ecc[4] = (uint8_t) (ecc_tmp >> 0);
 	ecc[5] = (uint8_t) (ecc_tmp >> 8);
 	ecc[6] = (uint8_t) (ecc_tmp >> 16);
 	ecc[7] = (uint8_t) (ecc_tmp >> 24);
 
-	ecc_tmp = readl(&regs->bank_regs[bank].ecc3);
+	ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
 	ecc[8] = (uint8_t) (ecc_tmp >> 0);
 	ecc[9] = (uint8_t) (ecc_tmp >> 8);
 	ecc[10] = (uint8_t) (ecc_tmp >> 16);
 	ecc[11] = (uint8_t) (ecc_tmp >> 24);
 
-	ecc_tmp = readl(&regs->bank_regs[bank].sts);
+	ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
 	ecc[12] = (uint8_t) (ecc_tmp >> 16);
 
 	return 0;
@@ -379,11 +515,11 @@
 {
 	struct fsmc_nand_data *host = container_of(mtd,
 					struct fsmc_nand_data, mtd);
-	struct fsmc_regs *regs = host->regs_va;
+	void __iomem *regs = host->regs_va;
 	uint32_t bank = host->bank;
 	uint32_t ecc_tmp;
 
-	ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
+	ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
 	ecc[0] = (uint8_t) (ecc_tmp >> 0);
 	ecc[1] = (uint8_t) (ecc_tmp >> 8);
 	ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -391,6 +527,166 @@
 	return 0;
 }
 
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+	int k, written_bits = 0;
+
+	for (k = 0; k < size; k++) {
+		written_bits += hweight8(~buff[k]);
+		if (written_bits > max_bits)
+			break;
+	}
+
+	return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+	struct fsmc_nand_data *host = param;
+
+	complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+		enum dma_data_direction direction)
+{
+	struct dma_chan *chan;
+	struct dma_device *dma_dev;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t dma_dst, dma_src, dma_addr;
+	dma_cookie_t cookie;
+	unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	int ret;
+
+	if (direction == DMA_TO_DEVICE)
+		chan = host->write_dma_chan;
+	else if (direction == DMA_FROM_DEVICE)
+		chan = host->read_dma_chan;
+	else
+		return -EINVAL;
+
+	dma_dev = chan->device;
+	dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+	if (direction == DMA_TO_DEVICE) {
+		dma_src = dma_addr;
+		dma_dst = host->data_pa;
+		flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
+	} else {
+		dma_src = host->data_pa;
+		dma_dst = dma_addr;
+		flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
+	}
+
+	tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+			len, flags);
+
+	if (!tx) {
+		dev_err(host->dev, "device_prep_dma_memcpy error\n");
+		dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+		return -EIO;
+	}
+
+	tx->callback = dma_complete;
+	tx->callback_param = host;
+	cookie = tx->tx_submit(tx);
+
+	ret = dma_submit_error(cookie);
+	if (ret) {
+		dev_err(host->dev, "dma_submit_error %d\n", cookie);
+		return ret;
+	}
+
+	dma_async_issue_pending(chan);
+
+	ret =
+	wait_for_completion_interruptible_timeout(&host->dma_access_complete,
+				msecs_to_jiffies(3000));
+	if (ret <= 0) {
+		chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+		dev_err(host->dev, "wait_for_completion_timeout\n");
+		return ret ? ret : -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @mtd:	MTD device structure
+ * @buf:	data buffer
+ * @len:	number of bytes to write
+ */
+static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *chip = mtd->priv;
+
+	if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+			IS_ALIGNED(len, sizeof(uint32_t))) {
+		uint32_t *p = (uint32_t *)buf;
+		len = len >> 2;
+		for (i = 0; i < len; i++)
+			writel(p[i], chip->IO_ADDR_W);
+	} else {
+		for (i = 0; i < len; i++)
+			writeb(buf[i], chip->IO_ADDR_W);
+	}
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @mtd:	MTD device structure
+ * @buf:	buffer to store date
+ * @len:	number of bytes to read
+ */
+static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *chip = mtd->priv;
+
+	if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+			IS_ALIGNED(len, sizeof(uint32_t))) {
+		uint32_t *p = (uint32_t *)buf;
+		len = len >> 2;
+		for (i = 0; i < len; i++)
+			p[i] = readl(chip->IO_ADDR_R);
+	} else {
+		for (i = 0; i < len; i++)
+			buf[i] = readb(chip->IO_ADDR_R);
+	}
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @mtd:	MTD device structure
+ * @buf:	buffer to store date
+ * @len:	number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct fsmc_nand_data *host;
+
+	host = container_of(mtd, struct fsmc_nand_data, mtd);
+	dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @mtd:	MTD device structure
+ * @buf:	data buffer
+ * @len:	number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
+		int len)
+{
+	struct fsmc_nand_data *host;
+
+	host = container_of(mtd, struct fsmc_nand_data, mtd);
+	dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
 /*
  * fsmc_read_page_hwecc
  * @mtd:	mtd info structure
@@ -426,7 +722,6 @@
 	uint8_t *oob = (uint8_t *)&ecc_oob[0];
 
 	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
-
 		chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
 		chip->read_buf(mtd, p, eccsize);
@@ -437,17 +732,19 @@
 			group++;
 
 			/*
-			* length is intentionally kept a higher multiple of 2
-			* to read at least 13 bytes even in case of 16 bit NAND
-			* devices
-			*/
-			len = roundup(len, 2);
+			 * length is intentionally kept a higher multiple of 2
+			 * to read at least 13 bytes even in case of 16 bit NAND
+			 * devices
+			 */
+			if (chip->options & NAND_BUSWIDTH_16)
+				len = roundup(len, 2);
+
 			chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
 			chip->read_buf(mtd, oob + j, len);
 			j += len;
 		}
 
-		memcpy(&ecc_code[i], oob, 13);
+		memcpy(&ecc_code[i], oob, chip->ecc.bytes);
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
@@ -461,7 +758,7 @@
 }
 
 /*
- * fsmc_correct_data
+ * fsmc_bch8_correct_data
  * @mtd:	mtd info structure
  * @dat:	buffer of read data
  * @read_ecc:	ecc read from device spare area
@@ -470,19 +767,51 @@
  * calc_ecc is a 104 bit information containing maximum of 8 error
  * offset informations of 13 bits each in 512 bytes of read data.
  */
-static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
 			     uint8_t *read_ecc, uint8_t *calc_ecc)
 {
 	struct fsmc_nand_data *host = container_of(mtd,
 					struct fsmc_nand_data, mtd);
-	struct fsmc_regs *regs = host->regs_va;
+	struct nand_chip *chip = mtd->priv;
+	void __iomem *regs = host->regs_va;
 	unsigned int bank = host->bank;
-	uint16_t err_idx[8];
-	uint64_t ecc_data[2];
+	uint32_t err_idx[8];
 	uint32_t num_err, i;
+	uint32_t ecc1, ecc2, ecc3, ecc4;
 
-	/* The calculated ecc is actually the correction index in data */
-	memcpy(ecc_data, calc_ecc, 13);
+	num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+
+	/* no bit flipping */
+	if (likely(num_err == 0))
+		return 0;
+
+	/* too many errors */
+	if (unlikely(num_err > 8)) {
+		/*
+		 * This is a temporary erase check. A newly erased page read
+		 * would result in an ecc error because the oob data is also
+		 * erased to FF and the calculated ecc for an FF data is not
+		 * FF..FF.
+		 * This is a workaround to skip performing correction in case
+		 * data is FF..FF
+		 *
+		 * Logic:
+		 * For every page, each bit written as 0 is counted until these
+		 * number of bits are greater than 8 (the maximum correction
+		 * capability of FSMC for each 512 + 13 bytes)
+		 */
+
+		int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+		int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+		if ((bits_ecc + bits_data) <= 8) {
+			if (bits_data)
+				memset(dat, 0xff, chip->ecc.size);
+			return bits_data;
+		}
+
+		return -EBADMSG;
+	}
 
 	/*
 	 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
@@ -493,27 +822,26 @@
 	 * uint64_t array and error offset indexes are populated in err_idx
 	 * array
 	 */
-	for (i = 0; i < 8; i++) {
-		if (i == 4) {
-			err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0];
-			ecc_data[1] >>= 1;
-			continue;
-		}
-		err_idx[i] = (ecc_data[i/4] & 0x1FFF);
-		ecc_data[i/4] >>= 13;
-	}
+	ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
+	ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
+	ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
+	ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
 
-	num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF;
-
-	if (num_err == 0xF)
-		return -EBADMSG;
+	err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+	err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+	err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+	err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+	err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+	err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+	err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+	err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
 
 	i = 0;
 	while (num_err--) {
 		change_bit(0, (unsigned long *)&err_idx[i]);
 		change_bit(1, (unsigned long *)&err_idx[i]);
 
-		if (err_idx[i] <= 512 * 8) {
+		if (err_idx[i] < chip->ecc.size * 8) {
 			change_bit(err_idx[i], (unsigned long *)dat);
 			i++;
 		}
@@ -521,6 +849,44 @@
 	return i;
 }
 
+static bool filter(struct dma_chan *chan, void *slave)
+{
+	chan->private = slave;
+	return true;
+}
+
+#ifdef CONFIG_OF
+static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+					       struct device_node *np)
+{
+	struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	u32 val;
+
+	/* Set default NAND width to 8 bits */
+	pdata->width = 8;
+	if (!of_property_read_u32(np, "bank-width", &val)) {
+		if (val == 2) {
+			pdata->width = 16;
+		} else if (val != 1) {
+			dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+			return -EINVAL;
+		}
+	}
+	of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
+	of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
+	if (of_get_property(np, "nand-skip-bbtscan", NULL))
+		pdata->options = NAND_SKIP_BBTSCAN;
+
+	return 0;
+}
+#else
+static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+					       struct device_node *np)
+{
+	return -ENOSYS;
+}
+#endif
+
 /*
  * fsmc_nand_probe - Probe function
  * @pdev:       platform device structure
@@ -528,102 +894,109 @@
 static int __init fsmc_nand_probe(struct platform_device *pdev)
 {
 	struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct device_node __maybe_unused *np = pdev->dev.of_node;
+	struct mtd_part_parser_data ppdata = {};
 	struct fsmc_nand_data *host;
 	struct mtd_info *mtd;
 	struct nand_chip *nand;
-	struct fsmc_regs *regs;
 	struct resource *res;
+	dma_cap_mask_t mask;
 	int ret = 0;
 	u32 pid;
 	int i;
 
+	if (np) {
+		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+		pdev->dev.platform_data = pdata;
+		ret = fsmc_nand_probe_config_dt(pdev, np);
+		if (ret) {
+			dev_err(&pdev->dev, "no platform data\n");
+			return -ENODEV;
+		}
+	}
+
 	if (!pdata) {
 		dev_err(&pdev->dev, "platform data is NULL\n");
 		return -EINVAL;
 	}
 
 	/* Allocate memory for the device structure (and zero it) */
-	host = kzalloc(sizeof(*host), GFP_KERNEL);
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 	if (!host) {
 		dev_err(&pdev->dev, "failed to allocate device structure\n");
 		return -ENOMEM;
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
-	if (!res) {
-		ret = -EIO;
-		goto err_probe1;
+	if (!res)
+		return -EINVAL;
+
+	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+				pdev->name)) {
+		dev_err(&pdev->dev, "Failed to get memory data resourse\n");
+		return -ENOENT;
 	}
 
-	host->resdata = request_mem_region(res->start, resource_size(res),
-			pdev->name);
-	if (!host->resdata) {
-		ret = -EIO;
-		goto err_probe1;
-	}
-
-	host->data_va = ioremap(res->start, resource_size(res));
+	host->data_pa = (dma_addr_t)res->start;
+	host->data_va = devm_ioremap(&pdev->dev, res->start,
+			resource_size(res));
 	if (!host->data_va) {
-		ret = -EIO;
-		goto err_probe1;
+		dev_err(&pdev->dev, "data ioremap failed\n");
+		return -ENOMEM;
 	}
 
-	host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE,
-			resource_size(res), pdev->name);
-	if (!host->resaddr) {
-		ret = -EIO;
-		goto err_probe1;
+	if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
+			resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
+		return -ENOENT;
 	}
 
-	host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res));
+	host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
+			resource_size(res));
 	if (!host->addr_va) {
-		ret = -EIO;
-		goto err_probe1;
+		dev_err(&pdev->dev, "ale ioremap failed\n");
+		return -ENOMEM;
 	}
 
-	host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE,
-			resource_size(res), pdev->name);
-	if (!host->rescmd) {
-		ret = -EIO;
-		goto err_probe1;
+	if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
+			resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
+		return -ENOENT;
 	}
 
-	host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res));
+	host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
+			resource_size(res));
 	if (!host->cmd_va) {
-		ret = -EIO;
-		goto err_probe1;
+		dev_err(&pdev->dev, "ale ioremap failed\n");
+		return -ENOMEM;
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
-	if (!res) {
-		ret = -EIO;
-		goto err_probe1;
+	if (!res)
+		return -EINVAL;
+
+	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+			pdev->name)) {
+		dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
+		return -ENOENT;
 	}
 
-	host->resregs = request_mem_region(res->start, resource_size(res),
-			pdev->name);
-	if (!host->resregs) {
-		ret = -EIO;
-		goto err_probe1;
-	}
-
-	host->regs_va = ioremap(res->start, resource_size(res));
+	host->regs_va = devm_ioremap(&pdev->dev, res->start,
+			resource_size(res));
 	if (!host->regs_va) {
-		ret = -EIO;
-		goto err_probe1;
+		dev_err(&pdev->dev, "regs ioremap failed\n");
+		return -ENOMEM;
 	}
 
 	host->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(host->clk)) {
 		dev_err(&pdev->dev, "failed to fetch block clock\n");
-		ret = PTR_ERR(host->clk);
-		host->clk = NULL;
-		goto err_probe1;
+		return PTR_ERR(host->clk);
 	}
 
 	ret = clk_enable(host->clk);
 	if (ret)
-		goto err_probe1;
+		goto err_clk_enable;
 
 	/*
 	 * This device ID is actually a common AMBA ID as used on the
@@ -639,7 +1012,14 @@
 
 	host->bank = pdata->bank;
 	host->select_chip = pdata->select_bank;
-	regs = host->regs_va;
+	host->partitions = pdata->partitions;
+	host->nr_partitions = pdata->nr_partitions;
+	host->dev = &pdev->dev;
+	host->dev_timings = pdata->nand_timings;
+	host->mode = pdata->mode;
+
+	if (host->mode == USE_DMA_ACCESS)
+		init_completion(&host->dma_access_complete);
 
 	/* Link all private pointers */
 	mtd = &host->mtd;
@@ -658,21 +1038,53 @@
 	nand->ecc.size = 512;
 	nand->options = pdata->options;
 	nand->select_chip = fsmc_select_chip;
+	nand->badblockbits = 7;
 
 	if (pdata->width == FSMC_NAND_BW16)
 		nand->options |= NAND_BUSWIDTH_16;
 
-	fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
+	switch (host->mode) {
+	case USE_DMA_ACCESS:
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_MEMCPY, mask);
+		host->read_dma_chan = dma_request_channel(mask, filter,
+				pdata->read_dma_priv);
+		if (!host->read_dma_chan) {
+			dev_err(&pdev->dev, "Unable to get read dma channel\n");
+			goto err_req_read_chnl;
+		}
+		host->write_dma_chan = dma_request_channel(mask, filter,
+				pdata->write_dma_priv);
+		if (!host->write_dma_chan) {
+			dev_err(&pdev->dev, "Unable to get write dma channel\n");
+			goto err_req_write_chnl;
+		}
+		nand->read_buf = fsmc_read_buf_dma;
+		nand->write_buf = fsmc_write_buf_dma;
+		break;
+
+	default:
+	case USE_WORD_ACCESS:
+		nand->read_buf = fsmc_read_buf;
+		nand->write_buf = fsmc_write_buf;
+		break;
+	}
+
+	fsmc_nand_setup(host->regs_va, host->bank,
+			nand->options & NAND_BUSWIDTH_16,
+			host->dev_timings);
 
 	if (AMBA_REV_BITS(host->pid) >= 8) {
 		nand->ecc.read_page = fsmc_read_page_hwecc;
 		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
-		nand->ecc.correct = fsmc_correct_data;
+		nand->ecc.correct = fsmc_bch8_correct_data;
 		nand->ecc.bytes = 13;
+		nand->ecc.strength = 8;
 	} else {
 		nand->ecc.calculate = fsmc_read_hwecc_ecc1;
 		nand->ecc.correct = nand_correct_data;
 		nand->ecc.bytes = 3;
+		nand->ecc.strength = 1;
 	}
 
 	/*
@@ -681,19 +1093,52 @@
 	if (nand_scan_ident(&host->mtd, 1, NULL)) {
 		ret = -ENXIO;
 		dev_err(&pdev->dev, "No NAND Device found!\n");
-		goto err_probe;
+		goto err_scan_ident;
 	}
 
 	if (AMBA_REV_BITS(host->pid) >= 8) {
-		if (host->mtd.writesize == 512) {
-			nand->ecc.layout = &fsmc_ecc4_sp_layout;
+		switch (host->mtd.oobsize) {
+		case 16:
+			nand->ecc.layout = &fsmc_ecc4_16_layout;
 			host->ecc_place = &fsmc_ecc4_sp_place;
-		} else {
-			nand->ecc.layout = &fsmc_ecc4_lp_layout;
+			break;
+		case 64:
+			nand->ecc.layout = &fsmc_ecc4_64_layout;
 			host->ecc_place = &fsmc_ecc4_lp_place;
+			break;
+		case 128:
+			nand->ecc.layout = &fsmc_ecc4_128_layout;
+			host->ecc_place = &fsmc_ecc4_lp_place;
+			break;
+		case 224:
+			nand->ecc.layout = &fsmc_ecc4_224_layout;
+			host->ecc_place = &fsmc_ecc4_lp_place;
+			break;
+		case 256:
+			nand->ecc.layout = &fsmc_ecc4_256_layout;
+			host->ecc_place = &fsmc_ecc4_lp_place;
+			break;
+		default:
+			printk(KERN_WARNING "No oob scheme defined for "
+			       "oobsize %d\n", mtd->oobsize);
+			BUG();
 		}
 	} else {
-		nand->ecc.layout = &fsmc_ecc1_layout;
+		switch (host->mtd.oobsize) {
+		case 16:
+			nand->ecc.layout = &fsmc_ecc1_16_layout;
+			break;
+		case 64:
+			nand->ecc.layout = &fsmc_ecc1_64_layout;
+			break;
+		case 128:
+			nand->ecc.layout = &fsmc_ecc1_128_layout;
+			break;
+		default:
+			printk(KERN_WARNING "No oob scheme defined for "
+			       "oobsize %d\n", mtd->oobsize);
+			BUG();
+		}
 	}
 
 	/* Second stage of scan to fill MTD data-structures */
@@ -713,13 +1158,9 @@
 	 * Check for partition info passed
 	 */
 	host->mtd.name = "nand";
-	ret = mtd_device_parse_register(&host->mtd, NULL, 0,
-			host->mtd.size <= 0x04000000 ?
-				partition_info_16KB_blk :
-				partition_info_128KB_blk,
-			host->mtd.size <= 0x04000000 ?
-				ARRAY_SIZE(partition_info_16KB_blk) :
-				ARRAY_SIZE(partition_info_128KB_blk));
+	ppdata.of_node = np;
+	ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata,
+					host->partitions, host->nr_partitions);
 	if (ret)
 		goto err_probe;
 
@@ -728,32 +1169,16 @@
 	return 0;
 
 err_probe:
+err_scan_ident:
+	if (host->mode == USE_DMA_ACCESS)
+		dma_release_channel(host->write_dma_chan);
+err_req_write_chnl:
+	if (host->mode == USE_DMA_ACCESS)
+		dma_release_channel(host->read_dma_chan);
+err_req_read_chnl:
 	clk_disable(host->clk);
-err_probe1:
-	if (host->clk)
-		clk_put(host->clk);
-	if (host->regs_va)
-		iounmap(host->regs_va);
-	if (host->resregs)
-		release_mem_region(host->resregs->start,
-				resource_size(host->resregs));
-	if (host->cmd_va)
-		iounmap(host->cmd_va);
-	if (host->rescmd)
-		release_mem_region(host->rescmd->start,
-				resource_size(host->rescmd));
-	if (host->addr_va)
-		iounmap(host->addr_va);
-	if (host->resaddr)
-		release_mem_region(host->resaddr->start,
-				resource_size(host->resaddr));
-	if (host->data_va)
-		iounmap(host->data_va);
-	if (host->resdata)
-		release_mem_region(host->resdata->start,
-				resource_size(host->resdata));
-
-	kfree(host);
+err_clk_enable:
+	clk_put(host->clk);
 	return ret;
 }
 
@@ -768,24 +1193,15 @@
 
 	if (host) {
 		nand_release(&host->mtd);
+
+		if (host->mode == USE_DMA_ACCESS) {
+			dma_release_channel(host->write_dma_chan);
+			dma_release_channel(host->read_dma_chan);
+		}
 		clk_disable(host->clk);
 		clk_put(host->clk);
-
-		iounmap(host->regs_va);
-		release_mem_region(host->resregs->start,
-				resource_size(host->resregs));
-		iounmap(host->cmd_va);
-		release_mem_region(host->rescmd->start,
-				resource_size(host->rescmd));
-		iounmap(host->addr_va);
-		release_mem_region(host->resaddr->start,
-				resource_size(host->resaddr));
-		iounmap(host->data_va);
-		release_mem_region(host->resdata->start,
-				resource_size(host->resdata));
-
-		kfree(host);
 	}
+
 	return 0;
 }
 
@@ -801,15 +1217,24 @@
 static int fsmc_nand_resume(struct device *dev)
 {
 	struct fsmc_nand_data *host = dev_get_drvdata(dev);
-	if (host)
+	if (host) {
 		clk_enable(host->clk);
+		fsmc_nand_setup(host->regs_va, host->bank,
+				host->nand.options & NAND_BUSWIDTH_16,
+				host->dev_timings);
+	}
 	return 0;
 }
 
-static const struct dev_pm_ops fsmc_nand_pm_ops = {
-	.suspend = fsmc_nand_suspend,
-	.resume = fsmc_nand_resume,
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id fsmc_nand_id_table[] = {
+	{ .compatible = "st,spear600-fsmc-nand" },
+	{}
 };
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
 #endif
 
 static struct platform_driver fsmc_nand_driver = {
@@ -817,6 +1242,7 @@
 	.driver = {
 		.owner = THIS_MODULE,
 		.name = "fsmc-nand",
+		.of_match_table = of_match_ptr(fsmc_nand_id_table),
 #ifdef CONFIG_PM
 		.pm = &fsmc_nand_pm_ops,
 #endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 590dd5c..e8ea710 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -848,7 +848,10 @@
 
 	sg_init_one(sgl, this->cmd_buffer, this->command_length);
 	dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
-	desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_MEM_TO_DEV, 1);
+	desc = dmaengine_prep_slave_sg(channel,
+				sgl, 1, DMA_MEM_TO_DEV,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
 	if (!desc) {
 		pr_err("step 2 error\n");
 		return -1;
@@ -889,7 +892,8 @@
 	/* [2] send DMA request */
 	prepare_data_dma(this, DMA_TO_DEVICE);
 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
-						1, DMA_MEM_TO_DEV, 1);
+					1, DMA_MEM_TO_DEV,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!desc) {
 		pr_err("step 2 error\n");
 		return -1;
@@ -925,7 +929,8 @@
 	/* [2] : send DMA request */
 	prepare_data_dma(this, DMA_FROM_DEVICE);
 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
-					1, DMA_DEV_TO_MEM, 1);
+					1, DMA_DEV_TO_MEM,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!desc) {
 		pr_err("step 2 error\n");
 		return -1;
@@ -970,8 +975,10 @@
 	pio[4] = payload;
 	pio[5] = auxiliary;
 
-	desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
-					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+	desc = dmaengine_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_TRANS_NONE,
+					DMA_CTRL_ACK);
 	if (!desc) {
 		pr_err("step 2 error\n");
 		return -1;
@@ -1035,7 +1042,8 @@
 	pio[5] = auxiliary;
 	desc = dmaengine_prep_slave_sg(channel,
 					(struct scatterlist *)pio,
-					ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
+					ARRAY_SIZE(pio), DMA_TRANS_NONE,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!desc) {
 		pr_err("step 2 error\n");
 		return -1;
@@ -1052,9 +1060,11 @@
 		| BF_GPMI_CTRL0_ADDRESS(address)
 		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
 	pio[1] = 0;
+	pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
 	desc = dmaengine_prep_slave_sg(channel,
-				(struct scatterlist *)pio, 2,
-				DMA_TRANS_NONE, 1);
+				(struct scatterlist *)pio, 3,
+				DMA_TRANS_NONE,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!desc) {
 		pr_err("step 3 error\n");
 		return -1;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 493ec2f..75b1dde 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1124,7 +1124,7 @@
 		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
 
 	/* Do we have a flash based bad block table ? */
-	if (chip->options & NAND_BBT_USE_FLASH)
+	if (chip->bbt_options & NAND_BBT_USE_FLASH)
 		ret = nand_update_bbt(mtd, ofs);
 	else {
 		chipnr = (int)(ofs >> chip->chip_shift);
@@ -1155,7 +1155,7 @@
 	return ret;
 }
 
-static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
 {
 	struct boot_rom_geometry *geometry = &this->rom_geometry;
 
@@ -1182,7 +1182,7 @@
 }
 
 static const char  *fingerprint = "STMP";
-static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
 {
 	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
 	struct device *dev = this->dev;
@@ -1239,7 +1239,7 @@
 }
 
 /* Writes a transcription stamp. */
-static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
 {
 	struct device *dev = this->dev;
 	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
@@ -1322,7 +1322,7 @@
 	return 0;
 }
 
-static int __devinit mx23_boot_init(struct gpmi_nand_data  *this)
+static int mx23_boot_init(struct gpmi_nand_data  *this)
 {
 	struct device *dev = this->dev;
 	struct nand_chip *chip = &this->nand;
@@ -1391,7 +1391,7 @@
 	return 0;
 }
 
-static int __devinit nand_boot_init(struct gpmi_nand_data  *this)
+static int nand_boot_init(struct gpmi_nand_data  *this)
 {
 	nand_boot_set_geometry(this);
 
@@ -1401,7 +1401,7 @@
 	return 0;
 }
 
-static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this)
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
 {
 	int ret;
 
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index e023bcc..ec6180d 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -20,7 +20,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
-#include <mach/dma.h>
+#include <linux/fsl/mxs-dma.h>
 
 struct resources {
 	void          *gpmi_regs;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 5dc6f0d..11e4878 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -135,8 +135,8 @@
 	}
 
 	/* Register the partitions */
-	mtd_device_parse_register(h1910_nand_mtd, NULL, 0,
-			partition_info, NUM_PARTITIONS);
+	mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
+				  NUM_PARTITIONS);
 
 	/* Return happy */
 	return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ac3b9f2..e4147e8 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,6 +332,11 @@
 	chip->ecc.mode		= NAND_ECC_HW_OOB_FIRST;
 	chip->ecc.size		= 512;
 	chip->ecc.bytes		= 9;
+	chip->ecc.strength	= 2;
+	/*
+	 * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
+	 * conservative guess, given 9 ecc bytes and reed-solomon alg.
+	 */
 
 	if (pdata)
 		chip->ecc.layout = pdata->ecc_layout;
@@ -367,9 +372,9 @@
 		goto err_gpio_free;
 	}
 
-	ret = mtd_device_parse_register(mtd, NULL, 0,
-			pdata ? pdata->partitions : NULL,
-			pdata ? pdata->num_partitions : 0);
+	ret = mtd_device_parse_register(mtd, NULL, NULL,
+					pdata ? pdata->partitions : NULL,
+					pdata ? pdata->num_partitions : 0);
 
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 74a43b8..cc0678a 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1225,9 +1225,16 @@
 		goto escan;
 	}
 
+	if (this->ecc.mode == NAND_ECC_HW) {
+		if (nfc_is_v1())
+			this->ecc.strength = 1;
+		else
+			this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+	}
+
 	/* Register the partitions */
-	mtd_device_parse_register(mtd, part_probes, 0,
-			pdata->parts, pdata->nr_parts);
+	mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
+				  pdata->nr_parts);
 
 	platform_set_drvdata(pdev, host);
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8a393f9..47b19c0 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -123,12 +123,6 @@
 		ret = -EINVAL;
 	}
 
-	/* Do not allow past end of device */
-	if (ofs + len > mtd->size) {
-		pr_debug("%s: past end of device\n", __func__);
-		ret = -EINVAL;
-	}
-
 	return ret;
 }
 
@@ -338,7 +332,7 @@
  */
 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
 {
-	int page, chipnr, res = 0;
+	int page, chipnr, res = 0, i = 0;
 	struct nand_chip *chip = mtd->priv;
 	u16 bad;
 
@@ -356,23 +350,29 @@
 		chip->select_chip(mtd, chipnr);
 	}
 
-	if (chip->options & NAND_BUSWIDTH_16) {
-		chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
-			      page);
-		bad = cpu_to_le16(chip->read_word(mtd));
-		if (chip->badblockpos & 0x1)
-			bad >>= 8;
-		else
-			bad &= 0xFF;
-	} else {
-		chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
-		bad = chip->read_byte(mtd);
-	}
+	do {
+		if (chip->options & NAND_BUSWIDTH_16) {
+			chip->cmdfunc(mtd, NAND_CMD_READOOB,
+					chip->badblockpos & 0xFE, page);
+			bad = cpu_to_le16(chip->read_word(mtd));
+			if (chip->badblockpos & 0x1)
+				bad >>= 8;
+			else
+				bad &= 0xFF;
+		} else {
+			chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
+					page);
+			bad = chip->read_byte(mtd);
+		}
 
-	if (likely(chip->badblockbits == 8))
-		res = bad != 0xFF;
-	else
-		res = hweight8(bad) < chip->badblockbits;
+		if (likely(chip->badblockbits == 8))
+			res = bad != 0xFF;
+		else
+			res = hweight8(bad) < chip->badblockbits;
+		ofs += mtd->writesize;
+		page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+		i++;
+	} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
 
 	if (getchip)
 		nand_release_device(mtd);
@@ -386,51 +386,79 @@
  * @ofs: offset from device start
  *
  * This is the default implementation, which can be overridden by a hardware
- * specific driver.
+ * specific driver. We try operations in the following order, according to our
+ * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
+ *  (1) erase the affected block, to allow OOB marker to be written cleanly
+ *  (2) update in-memory BBT
+ *  (3) write bad block marker to OOB area of affected block
+ *  (4) update flash-based BBT
+ * Note that we retain the first error encountered in (3) or (4), finish the
+ * procedures, and dump the error in the end.
 */
 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
 	struct nand_chip *chip = mtd->priv;
 	uint8_t buf[2] = { 0, 0 };
-	int block, ret, i = 0;
+	int block, res, ret = 0, i = 0;
+	int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
 
-	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
-		ofs += mtd->erasesize - mtd->writesize;
+	if (write_oob) {
+		struct erase_info einfo;
+
+		/* Attempt erase before marking OOB */
+		memset(&einfo, 0, sizeof(einfo));
+		einfo.mtd = mtd;
+		einfo.addr = ofs;
+		einfo.len = 1 << chip->phys_erase_shift;
+		nand_erase_nand(mtd, &einfo, 0);
+	}
 
 	/* Get block number */
 	block = (int)(ofs >> chip->bbt_erase_shift);
+	/* Mark block bad in memory-based BBT */
 	if (chip->bbt)
 		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
 
-	/* Do we have a flash based bad block table? */
-	if (chip->bbt_options & NAND_BBT_USE_FLASH)
-		ret = nand_update_bbt(mtd, ofs);
-	else {
+	/* Write bad block marker to OOB */
+	if (write_oob) {
 		struct mtd_oob_ops ops;
+		loff_t wr_ofs = ofs;
 
 		nand_get_device(chip, mtd, FL_WRITING);
 
-		/*
-		 * Write to first two pages if necessary. If we write to more
-		 * than one location, the first error encountered quits the
-		 * procedure. We write two bytes per location, so we dont have
-		 * to mess with 16 bit access.
-		 */
-		ops.len = ops.ooblen = 2;
 		ops.datbuf = NULL;
 		ops.oobbuf = buf;
-		ops.ooboffs = chip->badblockpos & ~0x01;
+		ops.ooboffs = chip->badblockpos;
+		if (chip->options & NAND_BUSWIDTH_16) {
+			ops.ooboffs &= ~0x01;
+			ops.len = ops.ooblen = 2;
+		} else {
+			ops.len = ops.ooblen = 1;
+		}
 		ops.mode = MTD_OPS_PLACE_OOB;
+
+		/* Write to first/last page(s) if necessary */
+		if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+			wr_ofs += mtd->erasesize - mtd->writesize;
 		do {
-			ret = nand_do_write_oob(mtd, ofs, &ops);
+			res = nand_do_write_oob(mtd, wr_ofs, &ops);
+			if (!ret)
+				ret = res;
 
 			i++;
-			ofs += mtd->writesize;
-		} while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) &&
-				i < 2);
+			wr_ofs += mtd->writesize;
+		} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
 
 		nand_release_device(mtd);
 	}
+
+	/* Update flash-based bad block table */
+	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+		res = nand_update_bbt(mtd, ofs);
+		if (!ret)
+			ret = res;
+	}
+
 	if (!ret)
 		mtd->ecc_stats.badblocks++;
 
@@ -1586,25 +1614,14 @@
 	struct mtd_oob_ops ops;
 	int ret;
 
-	/* Do not allow reads past end of device */
-	if ((from + len) > mtd->size)
-		return -EINVAL;
-	if (!len)
-		return 0;
-
 	nand_get_device(chip, mtd, FL_READING);
-
 	ops.len = len;
 	ops.datbuf = buf;
 	ops.oobbuf = NULL;
 	ops.mode = 0;
-
 	ret = nand_do_read_ops(mtd, from, &ops);
-
 	*retlen = ops.retlen;
-
 	nand_release_device(mtd);
-
 	return ret;
 }
 
@@ -2293,12 +2310,6 @@
 	struct mtd_oob_ops ops;
 	int ret;
 
-	/* Do not allow reads past end of device */
-	if ((to + len) > mtd->size)
-		return -EINVAL;
-	if (!len)
-		return 0;
-
 	/* Wait for the device to get ready */
 	panic_nand_wait(mtd, chip, 400);
 
@@ -2333,25 +2344,14 @@
 	struct mtd_oob_ops ops;
 	int ret;
 
-	/* Do not allow reads past end of device */
-	if ((to + len) > mtd->size)
-		return -EINVAL;
-	if (!len)
-		return 0;
-
 	nand_get_device(chip, mtd, FL_WRITING);
-
 	ops.len = len;
 	ops.datbuf = (uint8_t *)buf;
 	ops.oobbuf = NULL;
 	ops.mode = 0;
-
 	ret = nand_do_write_ops(mtd, to, &ops);
-
 	*retlen = ops.retlen;
-
 	nand_release_device(mtd);
-
 	return ret;
 }
 
@@ -2550,8 +2550,6 @@
 	if (check_offs_len(mtd, instr->addr, instr->len))
 		return -EINVAL;
 
-	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
 	/* Grab the lock and see if the device is available */
 	nand_get_device(chip, mtd, FL_ERASING);
 
@@ -2715,10 +2713,6 @@
  */
 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
 {
-	/* Check for invalid offset */
-	if (offs > mtd->size)
-		return -EINVAL;
-
 	return nand_block_checkbad(mtd, offs, 1, 0);
 }
 
@@ -2857,7 +2851,6 @@
 		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
 		return 0;
 
-	pr_info("ONFI flash detected\n");
 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
 	for (i = 0; i < 3; i++) {
 		chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
@@ -2898,7 +2891,8 @@
 	mtd->writesize = le32_to_cpu(p->byte_per_page);
 	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-	chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
+	chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
 	*busw = 0;
 	if (le16_to_cpu(p->features) & 1)
 		*busw = NAND_BUSWIDTH_16;
@@ -2907,6 +2901,7 @@
 	chip->options |= (NAND_NO_READRDY |
 			NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
 
+	pr_info("ONFI flash detected\n");
 	return 1;
 }
 
@@ -3238,6 +3233,10 @@
 	int i;
 	struct nand_chip *chip = mtd->priv;
 
+	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
+	BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+			!(chip->bbt_options & NAND_BBT_USE_FLASH));
+
 	if (!(chip->options & NAND_OWN_BUFFERS))
 		chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
 	if (!chip->buffers)
@@ -3350,6 +3349,7 @@
 		if (!chip->ecc.size)
 			chip->ecc.size = 256;
 		chip->ecc.bytes = 3;
+		chip->ecc.strength = 1;
 		break;
 
 	case NAND_ECC_SOFT_BCH:
@@ -3384,6 +3384,8 @@
 			pr_warn("BCH ECC initialization failed!\n");
 			BUG();
 		}
+		chip->ecc.strength =
+			chip->ecc.bytes*8 / fls(8*chip->ecc.size);
 		break;
 
 	case NAND_ECC_NONE:
@@ -3397,6 +3399,7 @@
 		chip->ecc.write_oob = nand_write_oob_std;
 		chip->ecc.size = mtd->writesize;
 		chip->ecc.bytes = 0;
+		chip->ecc.strength = 0;
 		break;
 
 	default:
@@ -3461,25 +3464,26 @@
 	mtd->type = MTD_NANDFLASH;
 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
 						MTD_CAP_NANDFLASH;
-	mtd->erase = nand_erase;
-	mtd->point = NULL;
-	mtd->unpoint = NULL;
-	mtd->read = nand_read;
-	mtd->write = nand_write;
-	mtd->panic_write = panic_nand_write;
-	mtd->read_oob = nand_read_oob;
-	mtd->write_oob = nand_write_oob;
-	mtd->sync = nand_sync;
-	mtd->lock = NULL;
-	mtd->unlock = NULL;
-	mtd->suspend = nand_suspend;
-	mtd->resume = nand_resume;
-	mtd->block_isbad = nand_block_isbad;
-	mtd->block_markbad = nand_block_markbad;
+	mtd->_erase = nand_erase;
+	mtd->_point = NULL;
+	mtd->_unpoint = NULL;
+	mtd->_read = nand_read;
+	mtd->_write = nand_write;
+	mtd->_panic_write = panic_nand_write;
+	mtd->_read_oob = nand_read_oob;
+	mtd->_write_oob = nand_write_oob;
+	mtd->_sync = nand_sync;
+	mtd->_lock = NULL;
+	mtd->_unlock = NULL;
+	mtd->_suspend = nand_suspend;
+	mtd->_resume = nand_resume;
+	mtd->_block_isbad = nand_block_isbad;
+	mtd->_block_markbad = nand_block_markbad;
 	mtd->writebufsize = mtd->writesize;
 
-	/* propagate ecc.layout to mtd_info */
+	/* propagate ecc info to mtd_info */
 	mtd->ecclayout = chip->ecc.layout;
+	mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
 
 	/* Check, if we should skip the bad block table scan */
 	if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ec68854..2b6f632 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -179,6 +179,7 @@
 	chip->ecc.mode = NAND_ECC_HW;
 	chip->ecc.size = 256;
 	chip->ecc.bytes = 3;
+	chip->ecc.strength = 1;
 	chip->priv = ndfc;
 
 	ndfc->mtd.priv = chip;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index b3a883e..c2b0bba 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1058,6 +1058,7 @@
 		(pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
 		info->nand.ecc.bytes            = 3;
 		info->nand.ecc.size             = 512;
+		info->nand.ecc.strength         = 1;
 		info->nand.ecc.calculate        = omap_calculate_ecc;
 		info->nand.ecc.hwctl            = omap_enable_hwecc;
 		info->nand.ecc.correct          = omap_correct_data;
@@ -1101,8 +1102,8 @@
 		goto out_release_mem_region;
 	}
 
-	mtd_device_parse_register(&info->mtd, NULL, 0,
-			pdata->parts, pdata->nr_parts);
+	mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+				  pdata->nr_parts);
 
 	platform_set_drvdata(pdev, &info->mtd);
 
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 29f505ad..1d3bfb2 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -129,8 +129,8 @@
 	}
 
 	mtd->name = "orion_nand";
-	ret = mtd_device_parse_register(mtd, NULL, 0,
-			board->parts, board->nr_parts);
+	ret = mtd_device_parse_register(mtd, NULL, NULL, board->parts,
+					board->nr_parts);
 	if (ret) {
 		nand_release(mtd);
 		goto no_dev;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 7f2da69..6404e6e 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -99,8 +99,9 @@
 	}
 
 	err = mtd_device_parse_register(&data->mtd,
-			pdata->chip.part_probe_types, 0,
-			pdata->chip.partitions, pdata->chip.nr_partitions);
+					pdata->chip.part_probe_types, NULL,
+					pdata->chip.partitions,
+					pdata->chip.nr_partitions);
 
 	if (!err)
 		return err;
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 7e52af5..0ddd90e 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -275,11 +275,10 @@
 	ppchameleon_mtd->name = "ppchameleon-nand";
 
 	/* Register the partitions */
-	mtd_device_parse_register(ppchameleon_mtd, NULL, 0,
-			ppchameleon_mtd->size == NAND_SMALL_SIZE ?
-				partition_info_me :
-				partition_info_hi,
-			NUM_PARTITIONS);
+	mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
+				  ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+					partition_info_me : partition_info_hi,
+				  NUM_PARTITIONS);
 
  nand_evb_init:
 	/****************************
@@ -365,11 +364,10 @@
 	ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
 
 	/* Register the partitions */
-	mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0,
-			ppchameleon_mtd->size == NAND_SMALL_SIZE ?
-				partition_info_me :
-				partition_info_hi,
-			NUM_PARTITIONS);
+	mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
+				  ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+				  partition_info_me : partition_info_hi,
+				  NUM_PARTITIONS);
 
 	/* Return happy */
 	return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5c3d719..def50ca 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1002,6 +1002,7 @@
 KEEP_CONFIG:
 	chip->ecc.mode = NAND_ECC_HW;
 	chip->ecc.size = host->page_size;
+	chip->ecc.strength = 1;
 
 	chip->options = NAND_NO_AUTOINCR;
 	chip->options |= NAND_NO_READRDY;
@@ -1228,8 +1229,9 @@
 			continue;
 		}
 
-		ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0,
-				pdata->parts[cs], pdata->nr_parts[cs]);
+		ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
+						NULL, pdata->parts[cs],
+						pdata->nr_parts[cs]);
 		if (!ret)
 			probe_success = 1;
 	}
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 769a4e0..c204018 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -891,6 +891,7 @@
 	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
 	chip->ecc.size = R852_DMA_LEN;
 	chip->ecc.bytes = SM_OOB_SIZE;
+	chip->ecc.strength = 2;
 	chip->ecc.hwctl = r852_ecc_hwctl;
 	chip->ecc.calculate = r852_ecc_calculate;
 	chip->ecc.correct = r852_ecc_correct;
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index f309add..e55b5cf 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -527,6 +527,7 @@
 	this->ecc.mode = NAND_ECC_HW_SYNDROME;
 	this->ecc.size = 512;
 	this->ecc.bytes = 8;
+	this->ecc.strength = 3;
 	/* return the status of extra status and ECC checks */
 	this->errstat = rtc_from4_errstat;
 	/* set the nand_oobinfo to support FPGA H/W error detection */
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 868685d..91121f3 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -751,8 +751,8 @@
 	if (set)
 		mtd->mtd.name = set->name;
 
-	return mtd_device_parse_register(&mtd->mtd, NULL, 0,
-			set->partitions, set->nr_partitions);
+	return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+					 set->partitions, set->nr_partitions);
 }
 
 /**
@@ -823,6 +823,7 @@
 		chip->ecc.calculate = s3c2410_nand_calculate_ecc;
 		chip->ecc.correct   = s3c2410_nand_correct_data;
 		chip->ecc.mode	    = NAND_ECC_HW;
+		chip->ecc.strength  = 1;
 
 		switch (info->cpu_type) {
 		case TYPE_S3C2410:
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 93b1f74..e9b2b26 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 #include <linux/mtd/mtd.h>
@@ -283,7 +284,7 @@
 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT;
+	uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
 	uint32_t flcmdcr_val, addr_len_bytes = 0;
 
 	/* Set SNAND bit if page size is 2048byte */
@@ -303,6 +304,7 @@
 		break;
 	case NAND_CMD_READ0:
 	case NAND_CMD_READOOB:
+	case NAND_CMD_RNDOUT:
 		addr_len_bytes = flctl->rw_ADRCNT;
 		flcmdcr_val |= CDSRC_E;
 		if (flctl->chip.options & NAND_BUSWIDTH_16)
@@ -320,6 +322,7 @@
 		break;
 	case NAND_CMD_READID:
 		flcmncr_val &= ~SNAND_E;
+		flcmdcr_val |= CDSRC_E;
 		addr_len_bytes = ADRCNT_1;
 		break;
 	case NAND_CMD_STATUS:
@@ -513,6 +516,8 @@
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
 	uint32_t read_cmd = 0;
 
+	pm_runtime_get_sync(&flctl->pdev->dev);
+
 	flctl->read_bytes = 0;
 	if (command != NAND_CMD_PAGEPROG)
 		flctl->index = 0;
@@ -525,7 +530,6 @@
 			execmd_read_page_sector(mtd, page_addr);
 			break;
 		}
-		empty_fifo(flctl);
 		if (flctl->page_size)
 			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
 				| command);
@@ -547,7 +551,6 @@
 			break;
 		}
 
-		empty_fifo(flctl);
 		if (flctl->page_size) {
 			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
 				| NAND_CMD_READ0);
@@ -559,15 +562,35 @@
 		flctl->read_bytes = mtd->oobsize;
 		goto read_normal_exit;
 
-	case NAND_CMD_READID:
-		empty_fifo(flctl);
-		set_cmd_regs(mtd, command, command);
-		set_addr(mtd, 0, 0);
+	case NAND_CMD_RNDOUT:
+		if (flctl->hwecc)
+			break;
 
-		flctl->read_bytes = 4;
+		if (flctl->page_size)
+			set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+				| command);
+		else
+			set_cmd_regs(mtd, command, command);
+
+		set_addr(mtd, column, 0);
+
+		flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+		goto read_normal_exit;
+
+	case NAND_CMD_READID:
+		set_cmd_regs(mtd, command, command);
+
+		/* READID is always performed using an 8-bit bus */
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			column <<= 1;
+		set_addr(mtd, column, 0);
+
+		flctl->read_bytes = 8;
 		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+		empty_fifo(flctl);
 		start_translation(flctl);
-		read_datareg(flctl, 0);	/* read and end */
+		read_fiforeg(flctl, flctl->read_bytes, 0);
+		wait_completion(flctl);
 		break;
 
 	case NAND_CMD_ERASE1:
@@ -650,29 +673,55 @@
 	default:
 		break;
 	}
-	return;
+	goto runtime_exit;
 
 read_normal_exit:
 	writel(flctl->read_bytes, FLDTCNTR(flctl));	/* set read size */
+	empty_fifo(flctl);
 	start_translation(flctl);
 	read_fiforeg(flctl, flctl->read_bytes, 0);
 	wait_completion(flctl);
+runtime_exit:
+	pm_runtime_put_sync(&flctl->pdev->dev);
 	return;
 }
 
 static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint32_t flcmncr_val = readl(FLCMNCR(flctl));
+	int ret;
 
 	switch (chipnr) {
 	case -1:
-		flcmncr_val &= ~CE0_ENABLE;
-		writel(flcmncr_val, FLCMNCR(flctl));
+		flctl->flcmncr_base &= ~CE0_ENABLE;
+
+		pm_runtime_get_sync(&flctl->pdev->dev);
+		writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+		if (flctl->qos_request) {
+			dev_pm_qos_remove_request(&flctl->pm_qos);
+			flctl->qos_request = 0;
+		}
+
+		pm_runtime_put_sync(&flctl->pdev->dev);
 		break;
 	case 0:
-		flcmncr_val |= CE0_ENABLE;
-		writel(flcmncr_val, FLCMNCR(flctl));
+		flctl->flcmncr_base |= CE0_ENABLE;
+
+		if (!flctl->qos_request) {
+			ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+							&flctl->pm_qos, 100);
+			if (ret < 0)
+				dev_err(&flctl->pdev->dev,
+					"PM QoS request failed: %d\n", ret);
+			flctl->qos_request = 1;
+		}
+
+		if (flctl->holden) {
+			pm_runtime_get_sync(&flctl->pdev->dev);
+			writel(HOLDEN, FLHOLDCR(flctl));
+			pm_runtime_put_sync(&flctl->pdev->dev);
+		}
 		break;
 	default:
 		BUG();
@@ -730,11 +779,6 @@
 	return 0;
 }
 
-static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
-{
-	writel(val, FLCMNCR(flctl));
-}
-
 static int flctl_chip_init_tail(struct mtd_info *mtd)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -781,13 +825,13 @@
 
 		chip->ecc.size = 512;
 		chip->ecc.bytes = 10;
+		chip->ecc.strength = 4;
 		chip->ecc.read_page = flctl_read_page_hwecc;
 		chip->ecc.write_page = flctl_write_page_hwecc;
 		chip->ecc.mode = NAND_ECC_HW;
 
 		/* 4 symbols ECC enabled */
-		writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02,
-				FLCMNCR(flctl));
+		flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02;
 	} else {
 		chip->ecc.mode = NAND_ECC_SOFT;
 	}
@@ -819,13 +863,13 @@
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(&pdev->dev, "failed to get I/O memory\n");
-		goto err;
+		goto err_iomap;
 	}
 
 	flctl->reg = ioremap(res->start, resource_size(res));
 	if (flctl->reg == NULL) {
 		dev_err(&pdev->dev, "failed to remap I/O memory\n");
-		goto err;
+		goto err_iomap;
 	}
 
 	platform_set_drvdata(pdev, flctl);
@@ -833,9 +877,9 @@
 	nand = &flctl->chip;
 	flctl_mtd->priv = nand;
 	flctl->pdev = pdev;
+	flctl->flcmncr_base = pdata->flcmncr_val;
 	flctl->hwecc = pdata->has_hwecc;
-
-	flctl_register_init(flctl, pdata->flcmncr_val);
+	flctl->holden = pdata->use_holden;
 
 	nand->options = NAND_NO_AUTOINCR;
 
@@ -855,23 +899,28 @@
 		nand->read_word = flctl_read_word;
 	}
 
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_resume(&pdev->dev);
+
 	ret = nand_scan_ident(flctl_mtd, 1, NULL);
 	if (ret)
-		goto err;
+		goto err_chip;
 
 	ret = flctl_chip_init_tail(flctl_mtd);
 	if (ret)
-		goto err;
+		goto err_chip;
 
 	ret = nand_scan_tail(flctl_mtd);
 	if (ret)
-		goto err;
+		goto err_chip;
 
 	mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
 
 	return 0;
 
-err:
+err_chip:
+	pm_runtime_disable(&pdev->dev);
+err_iomap:
 	kfree(flctl);
 	return ret;
 }
@@ -881,6 +930,7 @@
 	struct sh_flctl *flctl = platform_get_drvdata(pdev);
 
 	nand_release(&flctl->mtd);
+	pm_runtime_disable(&pdev->dev);
 	kfree(flctl);
 
 	return 0;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index b175c0f..3421e37 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -167,6 +167,7 @@
 	this->ecc.mode = NAND_ECC_HW;
 	this->ecc.size = 256;
 	this->ecc.bytes = 3;
+	this->ecc.strength = 1;
 	this->badblock_pattern = data->badblock_pattern;
 	this->ecc.layout = data->ecc_layout;
 	this->ecc.hwctl = sharpsl_nand_enable_hwecc;
@@ -181,8 +182,8 @@
 	/* Register the partitions */
 	sharpsl->mtd.name = "sharpsl-nand";
 
-	err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0,
-			data->partitions, data->nr_partitions);
+	err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL,
+					data->partitions, data->nr_partitions);
 	if (err)
 		goto err_add;
 
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 6caa0cd..5aa5180 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -430,6 +430,7 @@
 	nand_chip->ecc.mode = NAND_ECC_HW;
 	nand_chip->ecc.size = 512;
 	nand_chip->ecc.bytes = 6;
+	nand_chip->ecc.strength = 2;
 	nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
 	nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
 	nand_chip->ecc.correct = tmio_nand_correct_data;
@@ -456,9 +457,9 @@
 		goto err_scan;
 	}
 	/* Register the partitions */
-	retval = mtd_device_parse_register(mtd, NULL, 0,
-			data ? data->partition : NULL,
-			data ? data->num_partitions : 0);
+	retval = mtd_device_parse_register(mtd, NULL, NULL,
+					   data ? data->partition : NULL,
+					   data ? data->num_partitions : 0);
 	if (!retval)
 		return retval;
 
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index c7c4f1d..26398dcf 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -356,6 +356,7 @@
 		/* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
 		chip->ecc.size = 256;
 		chip->ecc.bytes = 3;
+		chip->ecc.strength = 1;
 		chip->chip_delay = 100;
 		chip->controller = &drvdata->hw_control;
 
@@ -386,7 +387,7 @@
 		}
 		mtd->name = txx9_priv->mtdname;
 
-		mtd_device_parse_register(mtd, NULL, 0, NULL, 0);
+		mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
 		drvdata->mtds[i] = mtd;
 	}
 
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index a75382a..c5f4ebf 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,13 +56,6 @@
 	if (memcmp(mtd->name, "DiskOnChip", 10))
 		return;
 
-	if (!mtd_can_have_bb(mtd)) {
-		printk(KERN_ERR
-"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
-"Please use the new diskonchip driver under the NAND subsystem.\n");
-		return;
-	}
-
 	pr_debug("NFTL: add_mtd for %s\n", mtd->name);
 
 	nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 0ccd5bf..1c4f97c 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -70,9 +70,9 @@
 		goto out_iounmap;
 	}
 
-	err = mtd_device_parse_register(&info->mtd, NULL, 0,
-			pdata ? pdata->parts : NULL,
-			pdata ? pdata->nr_parts : 0);
+	err = mtd_device_parse_register(&info->mtd, NULL, NULL,
+					pdata ? pdata->parts : NULL,
+					pdata ? pdata->nr_parts : 0);
 
 	platform_set_drvdata(pdev, info);
 
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 7e9ea68..398a827 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -751,9 +751,9 @@
 	if ((r = onenand_scan(&c->mtd, 1)) < 0)
 		goto err_release_regulator;
 
-	r = mtd_device_parse_register(&c->mtd, NULL, 0,
-			pdata ? pdata->parts : NULL,
-			pdata ? pdata->nr_parts : 0);
+	r = mtd_device_parse_register(&c->mtd, NULL, NULL,
+				      pdata ? pdata->parts : NULL,
+				      pdata ? pdata->nr_parts : 0);
 	if (r)
 		goto err_release_onenand;
 
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a061bc1..b3ce12e 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1753,16 +1753,6 @@
 	pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
 			(int)len);
 
-	/* Initialize retlen, in case of early exit */
-	*retlen = 0;
-
-	/* Do not allow writes past end of device */
-	if (unlikely((to + len) > mtd->size)) {
-		printk(KERN_ERR "%s: Attempt write to past end of device\n",
-			__func__);
-		return -EINVAL;
-	}
-
 	/* Reject writes, which are not page aligned */
         if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
 		printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -1890,13 +1880,6 @@
 	ops->retlen = 0;
 	ops->oobretlen = 0;
 
-	/* Do not allow writes past end of device */
-	if (unlikely((to + len) > mtd->size)) {
-		printk(KERN_ERR "%s: Attempt write to past end of device\n",
-			__func__);
-		return -EINVAL;
-	}
-
 	/* Reject writes, which are not page aligned */
         if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
 		printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -2493,12 +2476,6 @@
 			(unsigned long long)instr->addr,
 			(unsigned long long)instr->len);
 
-	/* Do not allow erase past end of device */
-	if (unlikely((len + addr) > mtd->size)) {
-		printk(KERN_ERR "%s: Erase past end of device\n", __func__);
-		return -EINVAL;
-	}
-
 	if (FLEXONENAND(this)) {
 		/* Find the eraseregion of this address */
 		int i = flexonenand_region(mtd, addr);
@@ -2525,8 +2502,6 @@
 		return -EINVAL;
 	}
 
-	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-
 	/* Grab the lock and see if the device is available */
 	onenand_get_device(mtd, FL_ERASING);
 
@@ -4103,33 +4078,34 @@
 	mtd->oobavail = this->ecclayout->oobavail;
 
 	mtd->ecclayout = this->ecclayout;
+	mtd->ecc_strength = 1;
 
 	/* Fill in remaining MTD driver data */
 	mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
 	mtd->flags = MTD_CAP_NANDFLASH;
-	mtd->erase = onenand_erase;
-	mtd->point = NULL;
-	mtd->unpoint = NULL;
-	mtd->read = onenand_read;
-	mtd->write = onenand_write;
-	mtd->read_oob = onenand_read_oob;
-	mtd->write_oob = onenand_write_oob;
-	mtd->panic_write = onenand_panic_write;
+	mtd->_erase = onenand_erase;
+	mtd->_point = NULL;
+	mtd->_unpoint = NULL;
+	mtd->_read = onenand_read;
+	mtd->_write = onenand_write;
+	mtd->_read_oob = onenand_read_oob;
+	mtd->_write_oob = onenand_write_oob;
+	mtd->_panic_write = onenand_panic_write;
 #ifdef CONFIG_MTD_ONENAND_OTP
-	mtd->get_fact_prot_info = onenand_get_fact_prot_info;
-	mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
-	mtd->get_user_prot_info = onenand_get_user_prot_info;
-	mtd->read_user_prot_reg = onenand_read_user_prot_reg;
-	mtd->write_user_prot_reg = onenand_write_user_prot_reg;
-	mtd->lock_user_prot_reg = onenand_lock_user_prot_reg;
+	mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
+	mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
+	mtd->_get_user_prot_info = onenand_get_user_prot_info;
+	mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
+	mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
+	mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
 #endif
-	mtd->sync = onenand_sync;
-	mtd->lock = onenand_lock;
-	mtd->unlock = onenand_unlock;
-	mtd->suspend = onenand_suspend;
-	mtd->resume = onenand_resume;
-	mtd->block_isbad = onenand_block_isbad;
-	mtd->block_markbad = onenand_block_markbad;
+	mtd->_sync = onenand_sync;
+	mtd->_lock = onenand_lock;
+	mtd->_unlock = onenand_unlock;
+	mtd->_suspend = onenand_suspend;
+	mtd->_resume = onenand_resume;
+	mtd->_block_isbad = onenand_block_isbad;
+	mtd->_block_markbad = onenand_block_markbad;
 	mtd->owner = THIS_MODULE;
 	mtd->writebufsize = mtd->writesize;
 
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index fa1ee43..8e4b3f2 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -923,7 +923,7 @@
 		r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 		if (!r) {
 			dev_err(&pdev->dev, "no buffer memory resource defined\n");
-			return -ENOENT;
+			err = -ENOENT;
 			goto ahb_resource_failed;
 		}
 
@@ -964,7 +964,7 @@
 		r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 		if (!r) {
 			dev_err(&pdev->dev, "no dma memory resource defined\n");
-			return -ENOENT;
+			err = -ENOENT;
 			goto dma_resource_failed;
 		}
 
@@ -1014,7 +1014,7 @@
 	if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
 		dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
 
-	err = mtd_device_parse_register(mtd, NULL, 0,
+	err = mtd_device_parse_register(mtd, NULL, NULL,
 					pdata ? pdata->parts : NULL,
 					pdata ? pdata->nr_parts : 0);
 
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 48970c1..580035c 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -78,8 +78,7 @@
 
 	if ( directory < 0 ) {
 		offset = master->size + directory * master->erasesize;
-		while (mtd_can_have_bb(master) &&
-		       mtd_block_isbad(master, offset)) {
+		while (mtd_block_isbad(master, offset)) {
 			if (!offset) {
 			nogood:
 				printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +88,7 @@
 		}
 	} else {
 		offset = directory * master->erasesize;
-		while (mtd_can_have_bb(master) &&
-		       mtd_block_isbad(master, offset)) {
+		while (mtd_block_isbad(master, offset)) {
 			offset += master->erasesize;
 			if (offset == master->size)
 				goto nogood;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 072ed59..9e2dfd5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1256,7 +1256,7 @@
 
 static struct mtd_blktrans_ops sm_ftl_ops = {
 	.name		= "smblk",
-	.major		= -1,
+	.major		= 0,
 	.part_bits	= SM_FTL_PARTN_BITS,
 	.blksize	= SM_SECTOR_SIZE,
 	.getgeo		= sm_getgeo,
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index e2cdebf..61af9bb 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -386,19 +386,11 @@
 	return count;
 }
 
-static int default_open(struct inode *inode, struct file *file)
-{
-	if (inode->i_private)
-		file->private_data = inode->i_private;
-
-	return 0;
-}
-
 /* File operations for all UBI debugfs files */
 static const struct file_operations dfs_fops = {
 	.read   = dfs_file_read,
 	.write  = dfs_file_write,
-	.open   = default_open,
+	.open	= simple_open,
 	.llseek = no_llseek,
 	.owner  = THIS_MODULE,
 };
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 941bc3c..90b9882 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -174,11 +174,7 @@
 	int err = 0, lnum, offs, total_read;
 	struct gluebi_device *gluebi;
 
-	if (len < 0 || from < 0 || from + len > mtd->size)
-		return -EINVAL;
-
 	gluebi = container_of(mtd, struct gluebi_device, mtd);
-
 	lnum = div_u64_rem(from, mtd->erasesize, &offs);
 	total_read = len;
 	while (total_read) {
@@ -218,14 +214,7 @@
 	int err = 0, lnum, offs, total_written;
 	struct gluebi_device *gluebi;
 
-	if (len < 0 || to < 0 || len + to > mtd->size)
-		return -EINVAL;
-
 	gluebi = container_of(mtd, struct gluebi_device, mtd);
-
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-
 	lnum = div_u64_rem(to, mtd->erasesize, &offs);
 
 	if (len % mtd->writesize || offs % mtd->writesize)
@@ -265,21 +254,13 @@
 	int err, i, lnum, count;
 	struct gluebi_device *gluebi;
 
-	if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
-		return -EINVAL;
-	if (instr->len < 0 || instr->addr + instr->len > mtd->size)
-		return -EINVAL;
 	if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
 		return -EINVAL;
 
 	lnum = mtd_div_by_eb(instr->addr, mtd);
 	count = mtd_div_by_eb(instr->len, mtd);
-
 	gluebi = container_of(mtd, struct gluebi_device, mtd);
 
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-
 	for (i = 0; i < count - 1; i++) {
 		err = ubi_leb_unmap(gluebi->desc, lnum + i);
 		if (err)
@@ -340,11 +321,11 @@
 	mtd->owner      = THIS_MODULE;
 	mtd->writesize  = di->min_io_size;
 	mtd->erasesize  = vi->usable_leb_size;
-	mtd->read       = gluebi_read;
-	mtd->write      = gluebi_write;
-	mtd->erase      = gluebi_erase;
-	mtd->get_device = gluebi_get_device;
-	mtd->put_device = gluebi_put_device;
+	mtd->_read       = gluebi_read;
+	mtd->_write      = gluebi_write;
+	mtd->_erase      = gluebi_erase;
+	mtd->_get_device = gluebi_get_device;
+	mtd->_put_device = gluebi_put_device;
 
 	/*
 	 * In case of dynamic a volume, MTD device size is just volume size. In
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0c76186..62d2409 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -891,9 +891,15 @@
 
 	switch (bond->params.fail_over_mac) {
 	case BOND_FOM_ACTIVE:
-		if (new_active)
+		if (new_active) {
 			memcpy(bond->dev->dev_addr,  new_active->dev->dev_addr,
 			       new_active->dev->addr_len);
+			write_unlock_bh(&bond->curr_slave_lock);
+			read_unlock(&bond->lock);
+			call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
+			read_lock(&bond->lock);
+			write_lock_bh(&bond->curr_slave_lock);
+		}
 		break;
 	case BOND_FOM_FOLLOW:
 		/*
@@ -2028,6 +2034,9 @@
 	write_unlock_bh(&bond->lock);
 	unblock_netpoll_tx();
 
+	if (bond->slave_cnt == 0)
+		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
+
 	bond_compute_features(bond);
 	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
 	    (old_features & NETIF_F_VLAN_CHALLENGED))
@@ -3001,7 +3010,11 @@
 					   trans_start + delta_in_ticks)) ||
 			    bond->curr_active_slave != slave) {
 				slave->link = BOND_LINK_UP;
-				bond->current_arp_slave = NULL;
+				if (bond->current_arp_slave) {
+					bond_set_slave_inactive_flags(
+						bond->current_arp_slave);
+					bond->current_arp_slave = NULL;
+				}
 
 				pr_info("%s: link status definitely up for interface %s.\n",
 					bond->dev->name, slave->dev->name);
@@ -3695,17 +3708,52 @@
 	read_unlock(&bond->lock);
 }
 
-static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms)
+static int bond_neigh_init(struct neighbour *n)
 {
-	struct bonding *bond = netdev_priv(dev);
+	struct bonding *bond = netdev_priv(n->dev);
 	struct slave *slave = bond->first_slave;
+	const struct net_device_ops *slave_ops;
+	struct neigh_parms parms;
+	int ret;
 
-	if (slave) {
-		const struct net_device_ops *slave_ops
-			= slave->dev->netdev_ops;
-		if (slave_ops->ndo_neigh_setup)
-			return slave_ops->ndo_neigh_setup(slave->dev, parms);
-	}
+	if (!slave)
+		return 0;
+
+	slave_ops = slave->dev->netdev_ops;
+
+	if (!slave_ops->ndo_neigh_setup)
+		return 0;
+
+	parms.neigh_setup = NULL;
+	parms.neigh_cleanup = NULL;
+	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
+	if (ret)
+		return ret;
+
+	/*
+	 * Assign slave's neigh_cleanup to neighbour in case cleanup is called
+	 * after the last slave has been detached.  Assumes that all slaves
+	 * utilize the same neigh_cleanup (true at this writing as only user
+	 * is ipoib).
+	 */
+	n->parms->neigh_cleanup = parms.neigh_cleanup;
+
+	if (!parms.neigh_setup)
+		return 0;
+
+	return parms.neigh_setup(n);
+}
+
+/*
+ * The bonding ndo_neigh_setup is called at init time beofre any
+ * slave exists. So we must declare proxy setup function which will
+ * be used at run time to resolve the actual slave neigh param setup.
+ */
+static int bond_neigh_setup(struct net_device *dev,
+			    struct neigh_parms *parms)
+{
+	parms->neigh_setup   = bond_neigh_init;
+
 	return 0;
 }
 
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 96391c3..b71ce9b 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -127,12 +127,6 @@
 	debugfs_remove(cfspi->dbgfs_dir);
 }
 
-static int dbgfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
 			   size_t count, loff_t *ppos)
 {
@@ -243,13 +237,13 @@
 }
 
 static const struct file_operations dbgfs_state_fops = {
-	.open = dbgfs_open,
+	.open = simple_open,
 	.read = dbgfs_state,
 	.owner = THIS_MODULE
 };
 
 static const struct file_operations dbgfs_frame_fops = {
-	.open = dbgfs_open,
+	.open = simple_open,
 	.read = dbgfs_frame,
 	.owner = THIS_MODULE
 };
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index a59cf96..f219d38 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -125,6 +125,7 @@
 #include <linux/if.h>
 #include <linux/if_arp.h>
 #include <linux/if_eql.h>
+#include <linux/pkt_sched.h>
 
 #include <asm/uaccess.h>
 
@@ -143,7 +144,7 @@
 	equalizer_t *eql = (equalizer_t *) param;
 	struct list_head *this, *tmp, *head;
 
-	spin_lock_bh(&eql->queue.lock);
+	spin_lock(&eql->queue.lock);
 	head = &eql->queue.all_slaves;
 	list_for_each_safe(this, tmp, head) {
 		slave_t *slave = list_entry(this, slave_t, list);
@@ -157,7 +158,7 @@
 		}
 
 	}
-	spin_unlock_bh(&eql->queue.lock);
+	spin_unlock(&eql->queue.lock);
 
 	eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
 	add_timer(&eql->timer);
@@ -341,7 +342,7 @@
 		struct net_device *slave_dev = slave->dev;
 
 		skb->dev = slave_dev;
-		skb->priority = 1;
+		skb->priority = TC_PRIO_FILLER;
 		slave->bytes_queued += skb->len;
 		dev_queue_xmit(skb);
 		dev->stats.tx_packets++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e37161f..2c9ee55 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1173,6 +1173,13 @@
 };
 
 
+struct bnx2x_prev_path_list {
+	u8 bus;
+	u8 slot;
+	u8 path;
+	struct list_head list;
+};
+
 struct bnx2x {
 	/* Fields used in the tx and intr/napi performance paths
 	 * are grouped together in the beginning of the structure
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f1f3ca6..4b05481 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1721,6 +1721,29 @@
 	} while (0)
 #endif
 
+bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
+{
+	/* build FW version dword */
+	u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+		    (BCM_5710_FW_MINOR_VERSION << 8) +
+		    (BCM_5710_FW_REVISION_VERSION << 16) +
+		    (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+	/* read loaded FW from chip */
+	u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+	DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
+
+	if (loaded_fw != my_fw) {
+		if (is_err)
+			BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
+				  loaded_fw, my_fw);
+		return false;
+	}
+
+	return true;
+}
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
@@ -1815,23 +1838,8 @@
 		}
 		if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
 		    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
-			/* build FW version dword */
-			u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
-					(BCM_5710_FW_MINOR_VERSION << 8) +
-					(BCM_5710_FW_REVISION_VERSION << 16) +
-					(BCM_5710_FW_ENGINEERING_VERSION << 24);
-
-			/* read loaded FW from chip */
-			u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
-
-			DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
-			   loaded_fw, my_fw);
-
 			/* abort nic load if version mismatch */
-			if (my_fw != loaded_fw) {
-				BNX2X_ERR("bnx2x with FW %x already loaded, "
-					  "which mismatches my %x FW. aborting",
-					  loaded_fw, my_fw);
+			if (!bnx2x_test_firmware_version(bp, true)) {
 				rc = -EBUSY;
 				LOAD_ERROR_EXIT(bp, load_error2);
 			}
@@ -1866,7 +1874,6 @@
 		 * bnx2x_periodic_task().
 		 */
 		smp_mb();
-		queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 	} else
 		bp->port.pmf = 0;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 8b16338..5c27454 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -431,6 +431,9 @@
 
 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 
+/* validate currect fw is loaded */
+bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
+
 /* dev_close main block */
 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index cd6dfa9..b9b2633 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -25,31 +25,31 @@
 	(IRO[149].base + ((funcId) * IRO[149].m1))
 #define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
 #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[315].base + ((pfId) * IRO[315].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
 	(IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+	(IRO[317].base + ((pfId) * IRO[317].m1))
 #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
-	(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
-#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
-#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
 	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
-#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
 	(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
-#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
-	(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
-#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
-	(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
-#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
 	(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+	(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+	(IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+	(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
 #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[314].base + ((pfId) * IRO[314].m1))
+	(IRO[315].base + ((pfId) * IRO[315].m1))
 #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[306].base + ((pfId) * IRO[306].m1))
+	(IRO[307].base + ((pfId) * IRO[307].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[305].base + ((pfId) * IRO[305].m1))
+	(IRO[306].base + ((pfId) * IRO[306].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[304].base + ((pfId) * IRO[304].m1))
+	(IRO[305].base + ((pfId) * IRO[305].m1))
 #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
 	(IRO[151].base + ((funcId) * IRO[151].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -96,37 +96,37 @@
 #define TSTORM_FUNC_EN_OFFSET(funcId) \
 	(IRO[103].base + ((funcId) * IRO[103].m1))
 #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[271].base + ((pfId) * IRO[271].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
 	(IRO[272].base + ((pfId) * IRO[272].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
 	(IRO[273].base + ((pfId) * IRO[273].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
 	(IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+	(IRO[275].base + ((pfId) * IRO[275].m1))
 #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[270].base + ((pfId) * IRO[270].m1))
+	(IRO[271].base + ((pfId) * IRO[271].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[269].base + ((pfId) * IRO[269].m1))
+	(IRO[270].base + ((pfId) * IRO[270].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[268].base + ((pfId) * IRO[268].m1))
+	(IRO[269].base + ((pfId) * IRO[269].m1))
 #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-	(IRO[267].base + ((pfId) * IRO[267].m1))
+	(IRO[268].base + ((pfId) * IRO[268].m1))
 #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
-	(IRO[276].base + ((pfId) * IRO[276].m1))
+	(IRO[277].base + ((pfId) * IRO[277].m1))
 #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[263].base + ((pfId) * IRO[263].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
 	(IRO[264].base + ((pfId) * IRO[264].m1))
-#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
 	(IRO[265].base + ((pfId) * IRO[265].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
 	(IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[267].base + ((pfId) * IRO[267].m1))
 #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
 	(IRO[202].base + ((pfId) * IRO[202].m1))
 #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
 	(IRO[105].base + ((funcId) * IRO[105].m1))
 #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
-	(IRO[216].base + ((pfId) * IRO[216].m1))
+	(IRO[217].base + ((pfId) * IRO[217].m1))
 #define TSTORM_VF_TO_PF_OFFSET(funcId) \
 	(IRO[104].base + ((funcId) * IRO[104].m1))
 #define USTORM_AGG_DATA_OFFSET (IRO[206].base)
@@ -140,29 +140,29 @@
 #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
 	(IRO[183].base + ((portId) * IRO[183].m1))
 #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
-	(IRO[317].base + ((pfId) * IRO[317].m1))
+	(IRO[318].base + ((pfId) * IRO[318].m1))
 #define USTORM_FUNC_EN_OFFSET(funcId) \
 	(IRO[178].base + ((funcId) * IRO[178].m1))
 #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[281].base + ((pfId) * IRO[281].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
 	(IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[286].base + ((pfId) * IRO[286].m1))
-#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
 	(IRO[283].base + ((pfId) * IRO[283].m1))
-#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[279].base + ((pfId) * IRO[279].m1))
-#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[278].base + ((pfId) * IRO[278].m1))
-#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[277].base + ((pfId) * IRO[277].m1))
-#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[280].base + ((pfId) * IRO[280].m1))
-#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+	(IRO[287].base + ((pfId) * IRO[287].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
 	(IRO[284].base + ((pfId) * IRO[284].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[279].base + ((pfId) * IRO[279].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[278].base + ((pfId) * IRO[278].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[281].base + ((pfId) * IRO[281].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
 	(IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[286].base + ((pfId) * IRO[286].m1))
 #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
 	(IRO[182].base + ((pfId) * IRO[182].m1))
 #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -188,39 +188,39 @@
 #define XSTORM_FUNC_EN_OFFSET(funcId) \
 	(IRO[47].base + ((funcId) * IRO[47].m1))
 #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[294].base + ((pfId) * IRO[294].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
-	(IRO[297].base + ((pfId) * IRO[297].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
-	(IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
-	(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
-	(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
-	(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
-	(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
-	(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[293].base + ((pfId) * IRO[293].m1))
-#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[292].base + ((pfId) * IRO[292].m1))
-#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[291].base + ((pfId) * IRO[291].m1))
-#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[296].base + ((pfId) * IRO[296].m1))
-#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
 	(IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+	(IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+	(IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+	(IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+	(IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+	(IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+	(IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+	(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[294].base + ((pfId) * IRO[294].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[293].base + ((pfId) * IRO[293].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[292].base + ((pfId) * IRO[292].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+	(IRO[296].base + ((pfId) * IRO[296].m1))
 #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
-	(IRO[290].base + ((pfId) * IRO[290].m1))
+	(IRO[291].base + ((pfId) * IRO[291].m1))
 #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[289].base + ((pfId) * IRO[289].m1))
+	(IRO[290].base + ((pfId) * IRO[290].m1))
 #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
-	(IRO[288].base + ((pfId) * IRO[288].m1))
+	(IRO[289].base + ((pfId) * IRO[289].m1))
 #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
-	(IRO[287].base + ((pfId) * IRO[287].m1))
+	(IRO[288].base + ((pfId) * IRO[288].m1))
 #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
 	(IRO[44].base + ((pfId) * IRO[44].m1))
 #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5d71b7d..dbff591 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1251,6 +1251,9 @@
 
 	#define DRV_MSG_CODE_LINK_STATUS_CHANGED        0x01000000
 
+	#define DRV_MSG_CODE_INITIATE_FLR               0x02000000
+	#define REQ_BC_VER_4_INITIATE_FLR               0x00070213
+
 	#define BIOS_MSG_CODE_LIC_CHALLENGE             0xff010000
 	#define BIOS_MSG_CODE_LIC_RESPONSE              0xff020000
 	#define BIOS_MSG_CODE_VIRT_MAC_PRIM             0xff030000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index beb4cdb..ad95324 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -35,7 +35,6 @@
 #define ETH_MAX_PACKET_SIZE		1500
 #define ETH_MAX_JUMBO_PACKET_SIZE	9600
 #define MDIO_ACCESS_TIMEOUT		1000
-#define BMAC_CONTROL_RX_ENABLE		2
 #define WC_LANE_MAX			4
 #define I2C_SWITCH_WIDTH		2
 #define I2C_BSC0			0
@@ -1372,7 +1371,14 @@
 		pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
 			XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
 			XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
-			XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
+			XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
+			XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+		/* Write pause and PFC registers */
+		REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+		pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+
 	}
 
 	/* Write pause and PFC registers */
@@ -3649,6 +3655,33 @@
 	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
 		bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
 		bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
+	} else if (CHIP_IS_E3(bp) &&
+		SINGLE_MEDIA_DIRECT(params)) {
+		u8 lane = bnx2x_get_warpcore_lane(phy, params);
+		u16 gp_status, gp_mask;
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
+				&gp_status);
+		gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
+			   MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
+			lane;
+		if ((gp_status & gp_mask) == gp_mask) {
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+		} else {
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+			ld_pause = ((ld_pause &
+				     MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+				    << 3);
+			lp_pause = ((lp_pause &
+				     MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+				    << 3);
+		}
 	} else {
 		bnx2x_cl45_read(bp, phy,
 				MDIO_AN_DEVAD,
@@ -3699,7 +3732,23 @@
 	u16 val16 = 0, lane, bam37 = 0;
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
-
+	/* Set to default registers that may be overriden by 10G force */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_CONTROL, 0x7415);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
 	/* Disable Autoneg: re-enable it after adv is done. */
 	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
 			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
@@ -3945,13 +3994,13 @@
 
 	} else {
 		misc1_val |= 0x9;
-		tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
-			   (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
-			   (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+		tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+			   (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+			   (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
 		tx_driver_val =
-		      ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		      ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
 		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
-		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+		       (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
 	}
 	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 			 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4369,7 +4418,7 @@
 		switch (serdes_net_if) {
 		case PORT_HW_CFG_NET_SERDES_IF_KR:
 			/* Enable KR Auto Neg */
-			if (params->loopback_mode == LOOPBACK_NONE)
+			if (params->loopback_mode != LOOPBACK_EXT)
 				bnx2x_warpcore_enable_AN_KR(phy, params, vars);
 			else {
 				DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
@@ -6167,12 +6216,14 @@
 
 		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
 		if (params->phy[EXT_PHY1].type ==
-			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
-			EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp & 0xfff1);
-		else {
-			EMAC_WR(bp, EMAC_REG_EMAC_LED,
-				(tmp | EMAC_LED_OVERRIDE));
-		}
+			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+			tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
+				EMAC_LED_100MB_OVERRIDE |
+				EMAC_LED_10MB_OVERRIDE);
+		else
+			tmp |= EMAC_LED_OVERRIDE;
+
+		EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
 		break;
 
 	case LED_MODE_OPER:
@@ -6227,10 +6278,15 @@
 				       hw_led_mode);
 		} else if ((params->phy[EXT_PHY1].type ==
 			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
-			   (mode != LED_MODE_OPER)) {
+			   (mode == LED_MODE_ON)) {
 			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
 			tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
-			EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 0x3);
+			EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
+				EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
+			/* Break here; otherwise, it'll disable the
+			 * intended override.
+			 */
+			break;
 		} else
 			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
 			       hw_led_mode);
@@ -6245,13 +6301,9 @@
 			       LED_BLINK_RATE_VAL_E1X_E2);
 		REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
 		       port*4, 1);
-		if ((params->phy[EXT_PHY1].type !=
-		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
-		    (mode != LED_MODE_OPER)) {
-			tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
-			EMAC_WR(bp, EMAC_REG_EMAC_LED,
-				(tmp & (~EMAC_LED_OVERRIDE)));
-		}
+		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+		EMAC_WR(bp, EMAC_REG_EMAC_LED,
+			(tmp & (~EMAC_LED_OVERRIDE)));
 
 		if (CHIP_IS_E1(bp) &&
 		    ((speed == SPEED_2500) ||
@@ -6844,6 +6896,12 @@
 			  SINGLE_MEDIA_DIRECT(params)) &&
 			 (phy_vars[active_external_phy].fault_detected == 0));
 
+	/* Update the PFC configuration in case it was changed */
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		vars->link_status |= LINK_STATUS_PFC_ENABLED;
+	else
+		vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
 	if (vars->link_up)
 		rc = bnx2x_update_link_up(params, vars, link_10g_plus);
 	else
@@ -8031,7 +8089,9 @@
 	netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
 			      " Port %d from %s part number %s\n",
 			 params->port, vendor_name, vendor_pn);
-	phy->flags |= FLAGS_SFP_NOT_APPROVED;
+	if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+	    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
+		phy->flags |= FLAGS_SFP_NOT_APPROVED;
 	return -EINVAL;
 }
 
@@ -9091,6 +9151,12 @@
 		tmp2 &= 0xFFEF;
 		bnx2x_cl45_write(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+				&tmp2);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+				 (tmp2 & 0x7fff));
 	}
 
 	return 0;
@@ -9271,12 +9337,11 @@
 				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 				 ((1<<5) | (1<<2)));
 	}
-	DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
-	bnx2x_8727_specific_func(phy, params, ENABLE_TX);
-	/* If transmitter is disabled, ignore false link up indication */
-	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
-	if (val1 & (1<<15)) {
+
+	if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+		DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n");
+		bnx2x_sfp_set_transmitter(params, phy, 1);
+	} else {
 		DP(NETIF_MSG_LINK, "Tx is disabled\n");
 		return 0;
 	}
@@ -9370,8 +9435,7 @@
 
 	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
-		bnx2x_save_spirom_version(bp, port,
-				((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
+		bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
 				phy->ver_addr);
 	} else {
 		/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
@@ -9794,6 +9858,15 @@
 				other_shmem_base_addr));
 
 	u32 shmem_base_path[2];
+
+	/* Work around for 84833 LED failure inside RESET status */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+		MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+		MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+		MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
+		MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
+
 	shmem_base_path[0] = params->shmem_base;
 	shmem_base_path[1] = other_shmem_base_addr;
 
@@ -10104,7 +10177,7 @@
 	u8 port;
 	u16 val16;
 
-	if (!(CHIP_IS_E1(bp)))
+	if (!(CHIP_IS_E1x(bp)))
 		port = BP_PATH(bp);
 	else
 		port = params->port;
@@ -10131,7 +10204,7 @@
 	u16 val;
 	u8 port;
 
-	if (!(CHIP_IS_E1(bp)))
+	if (!(CHIP_IS_E1x(bp)))
 		port = BP_PATH(bp);
 	else
 		port = params->port;
@@ -12050,6 +12123,9 @@
 
 	bnx2x_emac_init(params, vars);
 
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		vars->link_status |= LINK_STATUS_PFC_ENABLED;
+
 	if (params->num_phys == 0) {
 		DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
 		return -EINVAL;
@@ -12129,10 +12205,10 @@
 	 * Hold it as vars low
 	 */
 	 /* clear link led */
+	bnx2x_set_mdio_clk(bp, params->chip_id, port);
 	bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
 
 	if (reset_ext_phy) {
-		bnx2x_set_mdio_clk(bp, params->chip_id, port);
 		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 		      phy_index++) {
 			if (params->phy[phy_index].link_reset) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 7ba557a..763535e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -89,6 +89,8 @@
 #define PFC_BRB_FULL_LB_XON_THRESHOLD				250
 
 #define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+
+#define BMAC_CONTROL_RX_ENABLE		2
 /***********************************************************/
 /*                         Structs                         */
 /***********************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f7f9aa8..e077d25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -52,6 +52,7 @@
 #include <linux/prefetch.h>
 #include <linux/zlib.h>
 #include <linux/io.h>
+#include <linux/semaphore.h>
 #include <linux/stringify.h>
 #include <linux/vmalloc.h>
 
@@ -211,6 +212,10 @@
 
 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 
+/* Global resources for unloading a previously loaded device */
+#define BNX2X_PREV_WAIT_NEEDED 1
+static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static LIST_HEAD(bnx2x_prev_list);
 /****************************************************************************
 * General service functions
 ****************************************************************************/
@@ -8812,109 +8817,371 @@
 		bnx2x_undi_int_disable_e1h(bp);
 }
 
-static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
 {
-	u32 val;
+	u32 val, base_addr, offset, mask, reset_reg;
+	bool mac_stopped = false;
+	u8 port = BP_PORT(bp);
 
-	/* possibly another driver is trying to reset the chip */
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+	reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
 
-	/* check if doorbell queue is reset */
-	if (REG_RD(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET)
-	    & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
+	if (!CHIP_IS_E3(bp)) {
+		val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+		mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
+		if ((mask & reset_reg) && val) {
+			u32 wb_data[2];
+			BNX2X_DEV_INFO("Disable bmac Rx\n");
+			base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
+						: NIG_REG_INGRESS_BMAC0_MEM;
+			offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
+						: BIGMAC_REGISTER_BMAC_CONTROL;
 
-		/*
-		 * Check if it is the UNDI driver
-		 * UNDI driver initializes CID offset for normal bell to 0x7
-		 */
-		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
-		if (val == 0x7) {
-			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-			/* save our pf_num */
-			int orig_pf_num = bp->pf_num;
-			int port;
-			u32 swap_en, swap_val, value;
+			/*
+			 * use rd/wr since we cannot use dmae. This is safe
+			 * since MCP won't access the bus due to the request
+			 * to unload, and no function on the path can be
+			 * loaded at this time.
+			 */
+			wb_data[0] = REG_RD(bp, base_addr + offset);
+			wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+			wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+			REG_WR(bp, base_addr + offset, wb_data[0]);
+			REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
 
-			/* clear the UNDI indication */
-			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+		}
+		BNX2X_DEV_INFO("Disable emac Rx\n");
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
 
-			BNX2X_DEV_INFO("UNDI is active! reset device\n");
-
-			/* try unload UNDI on port 0 */
-			bp->pf_num = 0;
-			bp->fw_seq =
-			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
-				DRV_MSG_SEQ_NUMBER_MASK);
-			reset_code = bnx2x_fw_command(bp, reset_code, 0);
-
-			/* if UNDI is loaded on the other port */
-			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
-
-				/* send "DONE" for previous unload */
-				bnx2x_fw_command(bp,
-						 DRV_MSG_CODE_UNLOAD_DONE, 0);
-
-				/* unload UNDI on port 1 */
-				bp->pf_num = 1;
-				bp->fw_seq =
-			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
-					DRV_MSG_SEQ_NUMBER_MASK);
-				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
-				bnx2x_fw_command(bp, reset_code, 0);
-			}
-
-			bnx2x_undi_int_disable(bp);
-			port = BP_PORT(bp);
-
-			/* close input traffic and wait for it */
-			/* Do not rcv packets to BRB */
-			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
-					   NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
-			/* Do not direct rcv packets that are not for MCP to
-			 * the BRB */
-			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
-					   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
-			/* clear AEU */
-			REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-					   MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
-			msleep(10);
-
-			/* save NIG port swap info */
-			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-			/* reset device */
-			REG_WR(bp,
-			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-			       0xd3ffffff);
-
-			value = 0x1400;
-			if (CHIP_IS_E3(bp)) {
-				value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
-				value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
-			}
-
-			REG_WR(bp,
-			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-			       value);
-
-			/* take the NIG out of reset and restore swap values */
-			REG_WR(bp,
-			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
-			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
-			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
-			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
-
-			/* send unload done to the MCP */
-			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
-
-			/* restore our func and fw_seq */
-			bp->pf_num = orig_pf_num;
+		mac_stopped = true;
+	} else {
+		if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
+			BNX2X_DEV_INFO("Disable xmac Rx\n");
+			base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+			val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
+			REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+			       val & ~(1 << 1));
+			REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+			       val | (1 << 1));
+			REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+			mac_stopped = true;
+		}
+		mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+		if (mask & reset_reg) {
+			BNX2X_DEV_INFO("Disable umac Rx\n");
+			base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+			REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+			mac_stopped = true;
 		}
 	}
 
-	/* now it's safe to release the lock */
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+	if (mac_stopped)
+		msleep(20);
+
+}
+
+#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_RCQ(val)	((val) & 0xffff)
+#define BNX2X_PREV_UNDI_BD(val)		((val) >> 16 & 0xffff)
+#define BNX2X_PREV_UNDI_PROD(rcq, bd)	((bd) << 16 | (rcq))
+
+static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
+						 u8 inc)
+{
+	u16 rcq, bd;
+	u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
+
+	rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
+	bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
+
+	tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
+	REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
+
+	BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+		       port, bd, rcq);
+}
+
+static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
+{
+	u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+	if (!rc) {
+		BNX2X_ERR("MCP response failure, aborting\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
+{
+	struct bnx2x_prev_path_list *tmp_list;
+	int rc = false;
+
+	if (down_trylock(&bnx2x_prev_sem))
+		return false;
+
+	list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
+		if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
+		    bp->pdev->bus->number == tmp_list->bus &&
+		    BP_PATH(bp) == tmp_list->path) {
+			rc = true;
+			BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
+				       BP_PATH(bp));
+			break;
+		}
+	}
+
+	up(&bnx2x_prev_sem);
+
+	return rc;
+}
+
+static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
+{
+	struct bnx2x_prev_path_list *tmp_list;
+	int rc;
+
+	tmp_list = (struct bnx2x_prev_path_list *)
+		    kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
+	if (!tmp_list) {
+		BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
+		return -ENOMEM;
+	}
+
+	tmp_list->bus = bp->pdev->bus->number;
+	tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
+	tmp_list->path = BP_PATH(bp);
+
+	rc = down_interruptible(&bnx2x_prev_sem);
+	if (rc) {
+		BNX2X_ERR("Received %d when tried to take lock\n", rc);
+		kfree(tmp_list);
+	} else {
+		BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
+				BP_PATH(bp));
+		list_add(&tmp_list->list, &bnx2x_prev_list);
+		up(&bnx2x_prev_sem);
+	}
+
+	return rc;
+}
+
+static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
+{
+	int pos;
+	u32 cap;
+	struct pci_dev *dev = bp->pdev;
+
+	pos = pci_pcie_cap(dev);
+	if (!pos)
+		return false;
+
+	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
+	if (!(cap & PCI_EXP_DEVCAP_FLR))
+		return false;
+
+	return true;
+}
+
+static int __devinit bnx2x_do_flr(struct bnx2x *bp)
+{
+	int i, pos;
+	u16 status;
+	struct pci_dev *dev = bp->pdev;
+
+	/* probe the capability first */
+	if (bnx2x_can_flr(bp))
+		return -ENOTTY;
+
+	pos = pci_pcie_cap(dev);
+	if (!pos)
+		return -ENOTTY;
+
+	/* Wait for Transaction Pending bit clean */
+	for (i = 0; i < 4; i++) {
+		if (i)
+			msleep((1 << (i - 1)) * 100);
+
+		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+		if (!(status & PCI_EXP_DEVSTA_TRPND))
+			goto clear;
+	}
+
+	dev_err(&dev->dev,
+		"transaction is not cleared; proceeding with reset anyway\n");
+
+clear:
+	if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+		BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+			  bp->common.bc_ver);
+		return -EINVAL;
+	}
+
+	bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
+
+	return 0;
+}
+
+static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
+{
+	int rc;
+
+	BNX2X_DEV_INFO("Uncommon unload Flow\n");
+
+	/* Test if previous unload process was already finished for this path */
+	if (bnx2x_prev_is_path_marked(bp))
+		return bnx2x_prev_mcp_done(bp);
+
+	/* If function has FLR capabilities, and existing FW version matches
+	 * the one required, then FLR will be sufficient to clean any residue
+	 * left by previous driver
+	 */
+	if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
+		return bnx2x_do_flr(bp);
+
+	/* Close the MCP request, return failure*/
+	rc = bnx2x_prev_mcp_done(bp);
+	if (!rc)
+		rc = BNX2X_PREV_WAIT_NEEDED;
+
+	return rc;
+}
+
+static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
+{
+	u32 reset_reg, tmp_reg = 0, rc;
+	/* It is possible a previous function received 'common' answer,
+	 * but hasn't loaded yet, therefore creating a scenario of
+	 * multiple functions receiving 'common' on the same path.
+	 */
+	BNX2X_DEV_INFO("Common unload Flow\n");
+
+	if (bnx2x_prev_is_path_marked(bp))
+		return bnx2x_prev_mcp_done(bp);
+
+	reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+
+	/* Reset should be performed after BRB is emptied */
+	if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
+		u32 timer_count = 1000;
+		bool prev_undi = false;
+
+		/* Close the MAC Rx to prevent BRB from filling up */
+		bnx2x_prev_unload_close_mac(bp);
+
+		/* Check if the UNDI driver was previously loaded
+		 * UNDI driver initializes CID offset for normal bell to 0x7
+		 */
+		reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+		if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
+			tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+			if (tmp_reg == 0x7) {
+				BNX2X_DEV_INFO("UNDI previously loaded\n");
+				prev_undi = true;
+				/* clear the UNDI indication */
+				REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+			}
+		}
+		/* wait until BRB is empty */
+		tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+		while (timer_count) {
+			u32 prev_brb = tmp_reg;
+
+			tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+			if (!tmp_reg)
+				break;
+
+			BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
+
+			/* reset timer as long as BRB actually gets emptied */
+			if (prev_brb > tmp_reg)
+				timer_count = 1000;
+			else
+				timer_count--;
+
+			/* If UNDI resides in memory, manually increment it */
+			if (prev_undi)
+				bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
+
+			udelay(10);
+		}
+
+		if (!timer_count)
+			BNX2X_ERR("Failed to empty BRB, hope for the best\n");
+
+	}
+
+	/* No packets are in the pipeline, path is ready for reset */
+	bnx2x_reset_common(bp);
+
+	rc = bnx2x_prev_mark_path(bp);
+	if (rc) {
+		bnx2x_prev_mcp_done(bp);
+		return rc;
+	}
+
+	return bnx2x_prev_mcp_done(bp);
+}
+
+static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
+{
+	int time_counter = 10;
+	u32 rc, fw, hw_lock_reg, hw_lock_val;
+	BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
+
+       /* Release previously held locks */
+	hw_lock_reg = (BP_FUNC(bp) <= 5) ?
+		      (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
+		      (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
+
+	hw_lock_val = (REG_RD(bp, hw_lock_reg));
+	if (hw_lock_val) {
+		if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+			BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
+			REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+			       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
+		}
+
+		BNX2X_DEV_INFO("Release Previously held hw lock\n");
+		REG_WR(bp, hw_lock_reg, 0xffffffff);
+	} else
+		BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
+
+	if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
+		BNX2X_DEV_INFO("Release previously held alr\n");
+		REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
+	}
+
+
+	do {
+		/* Lock MCP using an unload request */
+		fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
+		if (!fw) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EBUSY;
+			break;
+		}
+
+		if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+			rc = bnx2x_prev_unload_common(bp);
+			break;
+		}
+
+		/* non-common reply from MCP night require looping */
+		rc = bnx2x_prev_unload_uncommon(bp);
+		if (rc != BNX2X_PREV_WAIT_NEEDED)
+			break;
+
+		msleep(20);
+	} while (--time_counter);
+
+	if (!time_counter || rc) {
+		BNX2X_ERR("Failed unloading previous driver, aborting\n");
+		rc = -EBUSY;
+	}
+
+	BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
+
+	return rc;
 }
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
@@ -10100,8 +10367,16 @@
 	func = BP_FUNC(bp);
 
 	/* need to reset chip if undi was active */
-	if (!BP_NOMCP(bp))
-		bnx2x_undi_unload(bp);
+	if (!BP_NOMCP(bp)) {
+		/* init fw_seq */
+		bp->fw_seq =
+			SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+							DRV_MSG_SEQ_NUMBER_MASK;
+		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+		bnx2x_prev_unload(bp);
+	}
+
 
 	if (CHIP_REV_IS_FPGA(bp))
 		dev_err(&bp->pdev->dev, "FPGA detected\n");
@@ -11431,9 +11706,18 @@
 
 static void __exit bnx2x_cleanup(void)
 {
+	struct list_head *pos, *q;
 	pci_unregister_driver(&bnx2x_pci_driver);
 
 	destroy_workqueue(bnx2x_wq);
+
+	/* Free globablly allocated resources */
+	list_for_each_safe(pos, q, &bnx2x_prev_list) {
+		struct bnx2x_prev_path_list *tmp =
+			list_entry(pos, struct bnx2x_prev_path_list, list);
+		list_del(pos);
+		kfree(tmp);
+	}
 }
 
 void bnx2x_notify_link_changed(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fd7fb45..c25803b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -987,6 +987,7 @@
  * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
 #define IGU_REG_WRITE_DONE_PENDING				 0x130480
 #define MCP_A_REG_MCPR_SCRATCH					 0x3a0000
+#define MCP_REG_MCPR_ACCESS_LOCK				 0x8009c
 #define MCP_REG_MCPR_CPU_PROGRAM_COUNTER			 0x8501c
 #define MCP_REG_MCPR_GP_INPUTS					 0x800c0
 #define MCP_REG_MCPR_GP_OENABLE					 0x800c8
@@ -1686,6 +1687,7 @@
    [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
    Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
    rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_1					 0xa580
 #define MISC_REG_RESET_REG_2					 0xa590
 /* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
    shared with the driver resides */
@@ -5352,6 +5354,7 @@
 #define XMAC_CTRL_REG_TX_EN					 (0x1<<0)
 #define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN				 (0x1<<18)
 #define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN				 (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON			 (0x1<<1)
 #define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN			 (0x1<<0)
 #define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN			 (0x1<<3)
 #define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN				 (0x1<<4)
@@ -5606,6 +5609,7 @@
 /* [RC 32] Parity register #0 read clear */
 #define XSEM_REG_XSEM_PRTY_STS_CLR_0				 0x280128
 #define XSEM_REG_XSEM_PRTY_STS_CLR_1				 0x280138
+#define MCPR_ACCESS_LOCK_LOCK					 (1L<<31)
 #define MCPR_NVM_ACCESS_ENABLE_EN				 (1L<<0)
 #define MCPR_NVM_ACCESS_ENABLE_WR_EN				 (1L<<1)
 #define MCPR_NVM_ADDR_NVM_ADDR_VALUE				 (0xffffffL<<0)
@@ -5732,6 +5736,7 @@
 #define MISC_REGISTERS_GPIO_PORT_SHIFT				 4
 #define MISC_REGISTERS_GPIO_SET_POS				 8
 #define MISC_REGISTERS_RESET_REG_1_CLEAR			 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_BRB1			 (0x1<<0)
 #define MISC_REGISTERS_RESET_REG_1_RST_DORQ			 (0x1<<19)
 #define MISC_REGISTERS_RESET_REG_1_RST_HC			 (0x1<<29)
 #define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)
@@ -6816,10 +6821,13 @@
 
 #define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL	0x0020
 #define MDIO_AN_REG_8481_LEGACY_MII_CTRL	0xffe0
+#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G	0x40
 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS	0xffe1
 #define MDIO_AN_REG_8481_LEGACY_AN_ADV		0xffe4
 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION	0xffe6
 #define MDIO_AN_REG_8481_1000T_CTRL		0xffe9
+#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL	0xfff0
+#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF	0x0008
 #define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW	0xfff5
 #define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS	0xfff7
 #define MDIO_AN_REG_8481_AUX_CTRL		0xfff8
@@ -6939,6 +6947,10 @@
 #define MDIO_WC_REG_GP2_STATUS_GP_2_2			0x81d2
 #define MDIO_WC_REG_GP2_STATUS_GP_2_3			0x81d3
 #define MDIO_WC_REG_GP2_STATUS_GP_2_4			0x81d4
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
 #define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP		0x81EE
 #define MDIO_WC_REG_UC_INFO_B1_VERSION			0x81F0
 #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE		0x81F2
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 3f52fad..5135733 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3847,7 +3847,7 @@
 			continue;
 
 		/* If we've got here we are going to find a free entry */
-		for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
+		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
 
 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4e4bb38..062ac33 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2778,7 +2778,9 @@
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
-	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+	     !tp->pci_fn))
 		return;
 
 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05ff076a..b126b98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2000,13 +2000,6 @@
 /*
  * debugfs support
  */
-
-static int mem_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
 			loff_t *ppos)
 {
@@ -2050,7 +2043,7 @@
 
 static const struct file_operations mem_debugfs_fops = {
 	.owner   = THIS_MODULE,
-	.open    = mem_open,
+	.open    = simple_open,
 	.read    = mem_read,
 	.llseek  = default_llseek,
 };
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9eb8159..f7f0bf5 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -356,13 +356,13 @@
 
 		if (prop)
 			tbiaddr = *prop;
-	}
 
-	if (tbiaddr == -1) {
-		err = -EBUSY;
-		goto err_free_irqs;
-	} else {
-		out_be32(tbipa, tbiaddr);
+		if (tbiaddr == -1) {
+			err = -EBUSY;
+			goto err_free_irqs;
+		} else {
+			out_be32(tbipa, tbiaddr);
+		}
 	}
 
 	err = of_mdiobus_register(new_bus, np);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4e3cd2f..17a46e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3945,6 +3945,8 @@
 		}
 
 	if (max_speed == SPEED_1000) {
+		unsigned int snums = qe_get_num_of_snums();
+
 		/* configure muram FIFOs for gigabit operation */
 		ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
 		ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
@@ -3954,11 +3956,11 @@
 		ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
 		ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
 
-		/* If QE's snum number is 46 which means we need to support
+		/* If QE's snum number is 46/76 which means we need to support
 		 * 4 UECs at 1000Base-T simultaneously, we need to allocate
 		 * more Threads to Rx.
 		 */
-		if (qe_get_num_of_snums() == 46)
+		if ((snums == 76) || (snums == 46))
 			ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
 		else
 			ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 0e9aec8..4348b6f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -164,6 +164,8 @@
 static bool e1000_vlan_used(struct e1000_adapter *adapter);
 static void e1000_vlan_mode(struct net_device *netdev,
 			    netdev_features_t features);
+static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+				     bool filter_on);
 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void e1000_restore_vlan(struct e1000_adapter *adapter);
@@ -215,7 +217,8 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
@@ -979,7 +982,7 @@
 	adapter = netdev_priv(netdev);
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
-	adapter->msg_enable = (1 << debug) - 1;
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 	adapter->bars = bars;
 	adapter->need_ioport = need_ioport;
 
@@ -1214,7 +1217,7 @@
 	if (err)
 		goto err_register;
 
-	e1000_vlan_mode(netdev, netdev->features);
+	e1000_vlan_filter_on_off(adapter, false);
 
 	/* print bus type/speed/width info */
 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
@@ -4770,6 +4773,22 @@
 	return false;
 }
 
+static void __e1000_vlan_mode(struct e1000_adapter *adapter,
+			      netdev_features_t features)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	ctrl = er32(CTRL);
+	if (features & NETIF_F_HW_VLAN_RX) {
+		/* enable VLAN tag insert/strip */
+		ctrl |= E1000_CTRL_VME;
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl &= ~E1000_CTRL_VME;
+	}
+	ew32(CTRL, ctrl);
+}
 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 				     bool filter_on)
 {
@@ -4779,6 +4798,7 @@
 	if (!test_bit(__E1000_DOWN, &adapter->flags))
 		e1000_irq_disable(adapter);
 
+	__e1000_vlan_mode(adapter, adapter->netdev->features);
 	if (filter_on) {
 		/* enable VLAN receive filtering */
 		rctl = er32(RCTL);
@@ -4799,24 +4819,14 @@
 }
 
 static void e1000_vlan_mode(struct net_device *netdev,
-	netdev_features_t features)
+			    netdev_features_t features)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	struct e1000_hw *hw = &adapter->hw;
-	u32 ctrl;
 
 	if (!test_bit(__E1000_DOWN, &adapter->flags))
 		e1000_irq_disable(adapter);
 
-	ctrl = er32(CTRL);
-	if (features & NETIF_F_HW_VLAN_RX) {
-		/* enable VLAN tag insert/strip */
-		ctrl |= E1000_CTRL_VME;
-	} else {
-		/* disable VLAN tag insert/strip */
-		ctrl &= ~E1000_CTRL_VME;
-	}
-	ew32(CTRL, ctrl);
+	__e1000_vlan_mode(adapter, features);
 
 	if (!test_bit(__E1000_DOWN, &adapter->flags))
 		e1000_irq_enable(adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 86cdd47..b83897f 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -161,6 +161,12 @@
 /* Time to wait before putting the device into D3 if there's no link (in ms). */
 #define LINK_TIMEOUT		100
 
+/*
+ * Count for polling __E1000_RESET condition every 10-20msec.
+ * Experimentation has shown the reset can take approximately 210msec.
+ */
+#define E1000_CHECK_RESET_COUNT		25
+
 #define DEFAULT_RDTR			0
 #define DEFAULT_RADV			8
 #define BURST_RDTR			0x20
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7152eb1..19ab215 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -60,6 +60,11 @@
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
 
 static const struct e1000_info *e1000_info_tbl[] = {
@@ -1054,6 +1059,13 @@
 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
 		/* execute the writes immediately */
 		e1e_flush();
+		/*
+		 * Due to rare timing issues, write to TIDV again to ensure
+		 * the write is successful
+		 */
+		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+		/* execute the writes immediately */
+		e1e_flush();
 		adapter->tx_hang_recheck = true;
 		return;
 	}
@@ -3611,6 +3623,16 @@
 
 	/* execute the writes immediately */
 	e1e_flush();
+
+	/*
+	 * due to rare timing issues, write to TIDV/RDTR again to ensure the
+	 * write is successful
+	 */
+	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+	/* execute the writes immediately */
+	e1e_flush();
 }
 
 static void e1000e_update_stats(struct e1000_adapter *adapter);
@@ -3963,6 +3985,10 @@
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct pci_dev *pdev = adapter->pdev;
+	int count = E1000_CHECK_RESET_COUNT;
+
+	while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+		usleep_range(10000, 20000);
 
 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
 
@@ -5467,6 +5493,11 @@
 	netif_device_detach(netdev);
 
 	if (netif_running(netdev)) {
+		int count = E1000_CHECK_RESET_COUNT;
+
+		while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+			usleep_range(10000, 20000);
+
 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
 		e1000e_down(adapter);
 		e1000_free_irq(adapter);
@@ -6172,7 +6203,7 @@
 	adapter->hw.adapter = adapter;
 	adapter->hw.mac.type = ei->mac;
 	adapter->max_hw_frame_size = ei->max_hw_frame_size;
-	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
 	mmio_start = pci_resource_start(pdev, 0);
 	mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c490241..5ec3159 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -238,6 +238,11 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
 struct igb_reg_info {
 	u32 ofs;
 	char *name;
@@ -1893,7 +1898,7 @@
 	adapter->pdev = pdev;
 	hw = &adapter->hw;
 	hw->back = adapter;
-	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
 	mmio_start = pci_resource_start(pdev, 0);
 	mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 217c143..d61ca2a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -55,6 +55,11 @@
 static const char igbvf_copyright[] =
 		  "Copyright (c) 2009 - 2012 Intel Corporation.";
 
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
 static int igbvf_poll(struct napi_struct *napi, int budget);
 static void igbvf_reset(struct igbvf_adapter *);
 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
@@ -2649,7 +2654,7 @@
 	adapter->flags = ei->flags;
 	adapter->hw.back = adapter;
 	adapter->hw.mac.type = ei->mac;
-	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
 	/* PCI config space info */
 
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 82aaa79..5fce363 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -134,8 +134,8 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
@@ -442,7 +442,7 @@
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
 	adapter->hw.back = adapter;
-	adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
 	adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
 	if (!adapter->hw.hw_addr) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 80e26ff..74e1921 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -544,7 +544,7 @@
 	u16 action;
 };
 
-enum ixbge_state_t {
+enum ixgbe_state_t {
 	__IXGBE_TESTING,
 	__IXGBE_RESETTING,
 	__IXGBE_DOWN,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index dde65f9..652e4b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,62 +44,94 @@
 #define DCB_NO_HW_CHG   1  /* DCB configuration did not change */
 #define DCB_HW_CHG      2  /* DCB configuration changed, no reset */
 
-int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
-                       struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
+int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
+		       struct ixgbe_dcb_config *dcfg, int tc_max)
 {
-	struct tc_configuration *src_tc_cfg = NULL;
-	struct tc_configuration *dst_tc_cfg = NULL;
-	int i;
+	struct tc_configuration *src = NULL;
+	struct tc_configuration *dst = NULL;
+	int i, j;
+	int tx = DCB_TX_CONFIG;
+	int rx = DCB_RX_CONFIG;
+	int changes = 0;
 
-	if (!src_dcb_cfg || !dst_dcb_cfg)
-		return -EINVAL;
+	if (!scfg || !dcfg)
+		return changes;
 
 	for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
-		src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
-		dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
+		src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
+		dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0];
 
-		dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
-				src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
+		if (dst->path[tx].prio_type != src->path[tx].prio_type) {
+			dst->path[tx].prio_type = src->path[tx].prio_type;
+			changes |= BIT_PG_TX;
+		}
 
-		dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
-				src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
+		if (dst->path[tx].bwg_id != src->path[tx].bwg_id) {
+			dst->path[tx].bwg_id = src->path[tx].bwg_id;
+			changes |= BIT_PG_TX;
+		}
 
-		dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
-				src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
+		if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) {
+			dst->path[tx].bwg_percent = src->path[tx].bwg_percent;
+			changes |= BIT_PG_TX;
+		}
 
-		dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
-				src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
+		if (dst->path[tx].up_to_tc_bitmap !=
+				src->path[tx].up_to_tc_bitmap) {
+			dst->path[tx].up_to_tc_bitmap =
+				src->path[tx].up_to_tc_bitmap;
+			changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
+		}
 
-		dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
-				src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
+		if (dst->path[rx].prio_type != src->path[rx].prio_type) {
+			dst->path[rx].prio_type = src->path[rx].prio_type;
+			changes |= BIT_PG_RX;
+		}
 
-		dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
-				src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
+		if (dst->path[rx].bwg_id != src->path[rx].bwg_id) {
+			dst->path[rx].bwg_id = src->path[rx].bwg_id;
+			changes |= BIT_PG_RX;
+		}
 
-		dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
-				src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
+		if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) {
+			dst->path[rx].bwg_percent = src->path[rx].bwg_percent;
+			changes |= BIT_PG_RX;
+		}
 
-		dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
-				src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
+		if (dst->path[rx].up_to_tc_bitmap !=
+				src->path[rx].up_to_tc_bitmap) {
+			dst->path[rx].up_to_tc_bitmap =
+				src->path[rx].up_to_tc_bitmap;
+			changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
+		}
 	}
 
 	for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
-		dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
-			[i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-				[DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
-		dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
-			[i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-				[DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
+		j = i - DCB_PG_ATTR_BW_ID_0;
+		if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) {
+			dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j];
+			changes |= BIT_PG_TX;
+		}
+		if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) {
+			dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j];
+			changes |= BIT_PG_RX;
+		}
 	}
 
 	for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
-		dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
-			src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
+		j = i - DCB_PFC_UP_ATTR_0;
+		if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) {
+			dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc;
+			changes |= BIT_PFC;
+		}
 	}
 
-	dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
+	if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
+		dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
+		changes |= BIT_PFC;
+	}
 
-	return 0;
+	return changes;
 }
 
 static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
@@ -179,20 +211,6 @@
 	if (up_map != DCB_ATTR_VALUE_UNDEFINED)
 		adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
 			up_map;
-
-	if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
-	     adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
-	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
-	     adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
-	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
-	     adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
-	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-	     adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
-		adapter->dcb_set_bitmap |= BIT_PG_TX;
-
-	if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-	     adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
-		adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -201,10 +219,6 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 	adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
-
-	if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
-	    adapter->dcb_cfg.bw_percentage[0][bwg_id])
-		adapter->dcb_set_bitmap |= BIT_PG_TX;
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -223,20 +237,6 @@
 	if (up_map != DCB_ATTR_VALUE_UNDEFINED)
 		adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
 			up_map;
-
-	if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
-	     adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
-	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
-	     adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
-	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
-	     adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
-	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-	     adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
-		adapter->dcb_set_bitmap |= BIT_PG_RX;
-
-	if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-	     adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
-		adapter->dcb_set_bitmap |= BIT_PFC;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -245,10 +245,6 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 	adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
-
-	if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
-	    adapter->dcb_cfg.bw_percentage[1][bwg_id])
-		adapter->dcb_set_bitmap |= BIT_PG_RX;
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -298,10 +294,8 @@
 
 	adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
 	if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
-	    adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
-		adapter->dcb_set_bitmap |= BIT_PFC;
+	    adapter->dcb_cfg.tc_config[priority].dcb_pfc)
 		adapter->temp_dcb_cfg.pfc_mode_enable = true;
-	}
 }
 
 static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -336,7 +330,8 @@
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	int ret, i;
+	int ret = DCB_NO_HW_CHG;
+	int i;
 #ifdef IXGBE_FCOE
 	struct dcb_app app = {
 			      .selector = DCB_APP_IDTYPE_ETHTYPE,
@@ -355,12 +350,13 @@
 
 	/* Fail command if not in CEE mode */
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-		return 1;
+		return ret;
 
-	ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-				 MAX_TRAFFIC_CLASS);
-	if (ret)
-		return DCB_NO_HW_CHG;
+	adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
+						      &adapter->dcb_cfg,
+						      MAX_TRAFFIC_CLASS);
+	if (!adapter->dcb_set_bitmap)
+		return ret;
 
 	if (adapter->dcb_cfg.pfc_mode_enable) {
 		switch (adapter->hw.mac.type) {
@@ -420,6 +416,8 @@
 
 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
 			netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
+
+		ret = DCB_HW_CHG_RST;
 	}
 
 	if (adapter->dcb_set_bitmap & BIT_PFC) {
@@ -430,7 +428,8 @@
 				     DCB_TX_CONFIG, prio_tc);
 		ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
 		ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
-		ret = DCB_HW_CHG;
+		if (ret != DCB_HW_CHG_RST)
+			ret = DCB_HW_CHG;
 	}
 
 	if (adapter->dcb_cfg.pfc_mode_enable)
@@ -531,9 +530,6 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 	adapter->temp_dcb_cfg.pfc_mode_enable = state;
-	if (adapter->temp_dcb_cfg.pfc_mode_enable !=
-		adapter->dcb_cfg.pfc_mode_enable)
-		adapter->dcb_set_bitmap |= BIT_PFC;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 398fc22..3e26b1f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,8 +63,8 @@
 			      "Intel(R) 10 Gigabit Network Connection";
 #endif
 #define MAJ 3
-#define MIN 6
-#define BUILD 7
+#define MIN 8
+#define BUILD 21
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 	__stringify(BUILD) "-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
@@ -141,13 +141,16 @@
 MODULE_PARM_DESC(allow_unsupported_sfp,
 		 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
 
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-
 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
 {
 	if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -6834,7 +6837,7 @@
 	adapter->pdev = pdev;
 	hw = &adapter->hw;
 	hw->back = adapter;
-	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
 			      pci_resource_len(pdev, 0));
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 581c659..307611a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -91,7 +91,10 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 /* forward decls */
 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
@@ -3367,7 +3370,7 @@
 	adapter->pdev = pdev;
 	hw = &adapter->hw;
 	hw->back = adapter;
-	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
 	/*
 	 * call save state here in standalone driver because it relies on
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 423a1a2..c9b504e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1767,13 +1767,14 @@
 
 	sky2_hw_up(sky2);
 
+	/* Enable interrupts from phy/mac for port */
+	imask = sky2_read32(hw, B0_IMSK);
+
 	if (hw->chip_id == CHIP_ID_YUKON_OPT ||
 	    hw->chip_id == CHIP_ID_YUKON_PRM ||
 	    hw->chip_id == CHIP_ID_YUKON_OP_2)
 		imask |= Y2_IS_PHY_QLNK;	/* enable PHY Quick Link */
 
-	/* Enable interrupts from phy/mac for port */
-	imask = sky2_read32(hw, B0_IMSK);
 	imask |= portirq_msk[port];
 	sky2_write32(hw, B0_IMSK, imask);
 	sky2_read32(hw, B0_IMSK);
@@ -2468,6 +2469,17 @@
 	return err;
 }
 
+static inline bool needs_copy(const struct rx_ring_info *re,
+			      unsigned length)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	/* Some architectures need the IP header to be aligned */
+	if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
+		return true;
+#endif
+	return length < copybreak;
+}
+
 /* For small just reuse existing skb for next receive */
 static struct sk_buff *receive_copy(struct sky2_port *sky2,
 				    const struct rx_ring_info *re,
@@ -2598,7 +2610,7 @@
 		goto error;
 
 okay:
-	if (length < copybreak)
+	if (needs_copy(re, length))
 		skb = receive_copy(sky2, re, length);
 	else
 		skb = receive_new(sky2, re, length);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9e2b911..d69fee4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -83,8 +83,9 @@
 
 #define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
 
-#define MLX4_EN_ALLOC_ORDER	2
-#define MLX4_EN_ALLOC_SIZE	(PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
+/* Use the maximum between 16384 and a single page */
+#define MLX4_EN_ALLOC_SIZE	PAGE_ALIGN(16384)
+#define MLX4_EN_ALLOC_ORDER	get_order(MLX4_EN_ALLOC_SIZE)
 
 #define MLX4_EN_MAX_LRO_DESCRIPTORS	32
 
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 6944424..6dfc26d 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1441,7 +1441,7 @@
 	}
 #endif
 	if (!is_valid_ether_addr(ndev->dev_addr))
-		dev_hw_addr_random(ndev, ndev->dev_addr);
+		eth_hw_addr_random(ndev);
 
 	/* Reset the ethernet controller */
 	__lpc_eth_reset(pldat);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7b23554..f545093 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5810,7 +5810,10 @@
 
 	rtl_pll_power_up(tp);
 
+	rtl_lock_work(tp);
+	napi_enable(&tp->napi);
 	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+	rtl_unlock_work(tp);
 
 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
 }
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 9755b49..3fb2355 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -7,7 +7,8 @@
 	depends on SUPERH && \
 		(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
 		 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
-		 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
+		 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
+		 CPU_SUBTYPE_SH7757)
 	select CRC32
 	select NET_CORE
 	select MII
@@ -16,4 +17,4 @@
 	---help---
 	  Renesas SuperH Ethernet device driver.
 	  This driver supporting CPUs are:
-		- SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
+		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8615961..d63e09b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,8 @@
 /*
  *  SuperH Ethernet device driver
  *
- *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- *  Copyright (C) 2008-2009 Renesas Solutions Corp.
+ *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
+ *  Copyright (C) 2008-2012 Renesas Solutions Corp.
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/clk.h>
 #include <linux/sh_eth.h>
 
 #include "sh_eth.h"
@@ -279,8 +280,9 @@
 		return &sh_eth_my_cpu_data;
 }
 
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
 #define SH_ETH_HAS_TSU	1
+static void sh_eth_reset_hw_crc(struct net_device *ndev);
 static void sh_eth_chip_reset(struct net_device *ndev)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -314,6 +316,9 @@
 	sh_eth_write(ndev, 0x0, RDFAR);
 	sh_eth_write(ndev, 0x0, RDFXR);
 	sh_eth_write(ndev, 0x0, RDFFR);
+
+	/* Reset HW CRC register */
+	sh_eth_reset_hw_crc(ndev);
 }
 
 static void sh_eth_set_duplex(struct net_device *ndev)
@@ -370,8 +375,17 @@
 	.no_trimd	= 1,
 	.no_ade		= 1,
 	.tsu		= 1,
+#if defined(CONFIG_CPU_SUBTYPE_SH7734)
+	.hw_crc     = 1,
+#endif
 };
 
+static void sh_eth_reset_hw_crc(struct net_device *ndev)
+{
+	if (sh_eth_my_cpu_data.hw_crc)
+		sh_eth_write(ndev, 0x0, CSMR);
+}
+
 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 #define SH_ETH_RESET_DEFAULT	1
 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -790,7 +804,7 @@
 	/* all sh_eth int mask */
 	sh_eth_write(ndev, 0, EESIPR);
 
-#if defined(__LITTLE_ENDIAN__)
+#if defined(__LITTLE_ENDIAN)
 	if (mdp->cd->hw_swap)
 		sh_eth_write(ndev, EDMR_EL, EDMR);
 	else
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57dc262..0fa14afc 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,8 +1,8 @@
 /*
  *  SuperH Ethernet device driver
  *
- *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- *  Copyright (C) 2008-2011 Renesas Solutions Corp.
+ *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
+ *  Copyright (C) 2008-2012 Renesas Solutions Corp.
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@
 	CEECR,
 	MAFCR,
 	RTRATE,
+	CSMR,
+	RMII_MII,
 
 	/* TSU Absolute address */
 	ARSTR,
@@ -172,6 +174,7 @@
 	[RMCR]	= 0x0458,
 	[RPADIR]	= 0x0460,
 	[FCFTR]	= 0x0468,
+	[CSMR] = 0x04E4,
 
 	[ECMR]	= 0x0500,
 	[ECSR]	= 0x0510,
@@ -200,6 +203,7 @@
 	[CERCR]	= 0x0768,
 	[CEECR]	= 0x0770,
 	[MAFCR]	= 0x0778,
+	[RMII_MII] =  0x0790,
 
 	[ARSTR]	= 0x0000,
 	[TSU_CTRST]	= 0x0004,
@@ -377,7 +381,7 @@
 /*
  * Register's bits
  */
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
 /* EDSR */
 enum EDSR_BIT {
 	EDSR_ENT = 0x01, EDSR_ENR = 0x02,
@@ -689,7 +693,7 @@
  */
 struct sh_eth_txdesc {
 	u32 status;		/* TD0 */
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+#if defined(__LITTLE_ENDIAN)
 	u16 pad0;		/* TD1 */
 	u16 buffer_length;	/* TD1 */
 #else
@@ -706,7 +710,7 @@
  */
 struct sh_eth_rxdesc {
 	u32 status;		/* RD0 */
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+#if defined(__LITTLE_ENDIAN)
 	u16 frame_length;	/* RD1 */
 	u16 buffer_length;	/* RD1 */
 #else
@@ -751,6 +755,7 @@
 	unsigned rpadir:1;		/* E-DMAC have RPADIR */
 	unsigned no_trimd:1;		/* E-DMAC DO NOT have TRIMD */
 	unsigned no_ade:1;	/* E-DMAC DO NOT have ADE bit in EESR */
+	unsigned hw_crc:1;	/* E-DMAC have CSMR */
 };
 
 struct sh_eth_private {
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 26b3c23..7581483 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -193,7 +193,7 @@
 		erase->state = MTD_ERASE_DONE;
 	} else {
 		erase->state = MTD_ERASE_FAILED;
-		erase->fail_addr = 0xffffffff;
+		erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 	}
 	mtd_erase_callback(erase);
 	return rc;
@@ -263,10 +263,10 @@
 		part->mtd.owner = THIS_MODULE;
 		part->mtd.priv = efx_mtd;
 		part->mtd.name = part->name;
-		part->mtd.erase = efx_mtd_erase;
-		part->mtd.read = efx_mtd->ops->read;
-		part->mtd.write = efx_mtd->ops->write;
-		part->mtd.sync = efx_mtd_sync;
+		part->mtd._erase = efx_mtd_erase;
+		part->mtd._read = efx_mtd->ops->read;
+		part->mtd._write = efx_mtd->ops->write;
+		part->mtd._sync = efx_mtd_sync;
 
 		if (mtd_device_register(&part->mtd, NULL, 0))
 			goto fail;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e85ffbd..48d56da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1737,10 +1737,12 @@
 	struct mac_device_info *mac;
 
 	/* Identify the MAC HW device */
-	if (priv->plat->has_gmac)
+	if (priv->plat->has_gmac) {
+		priv->dev->priv_flags |= IFF_UNICAST_FLT;
 		mac = dwmac1000_setup(priv->ioaddr);
-	else
+	} else {
 		mac = dwmac100_setup(priv->ioaddr);
+	}
 	if (!mac)
 		return -ENOMEM;
 
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 261356c..3d501ec 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -342,6 +342,21 @@
 }
 
 
+static void tile_net_return_credit(struct tile_net_cpu *info)
+{
+	struct tile_netio_queue *queue = &info->queue;
+	netio_queue_user_impl_t *qup = &queue->__user_part;
+
+	/* Return four credits after every fourth packet. */
+	if (--qup->__receive_credit_remaining == 0) {
+		u32 interval = qup->__receive_credit_interval;
+		qup->__receive_credit_remaining = interval;
+		__netio_fastio_return_credits(qup->__fastio_index, interval);
+	}
+}
+
+
+
 /*
  * Provide a linux buffer to LIPP.
  */
@@ -433,7 +448,7 @@
 	struct sk_buff **skb_ptr;
 
 	/* Request 96 extra bytes for alignment purposes. */
-	skb = netdev_alloc_skb(info->napi->dev, len + padding);
+	skb = netdev_alloc_skb(info->napi.dev, len + padding);
 	if (skb == NULL)
 		return false;
 
@@ -864,19 +879,11 @@
 
 		stats->rx_packets++;
 		stats->rx_bytes += len;
-
-		if (small)
-			info->num_needed_small_buffers++;
-		else
-			info->num_needed_large_buffers++;
 	}
 
-	/* Return four credits after every fourth packet. */
-	if (--qup->__receive_credit_remaining == 0) {
-		u32 interval = qup->__receive_credit_interval;
-		qup->__receive_credit_remaining = interval;
-		__netio_fastio_return_credits(qup->__fastio_index, interval);
-	}
+	/* ISSUE: It would be nice to defer this until the packet has */
+	/* actually been processed. */
+	tile_net_return_credit(info);
 
 	/* Consume this packet. */
 	qup->__packet_receive_read = index2;
@@ -1543,7 +1550,7 @@
 
 	/* Drain all the LIPP buffers. */
 	while (true) {
-		int buffer;
+		unsigned int buffer;
 
 		/* NOTE: This should never fail. */
 		if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
@@ -1707,7 +1714,7 @@
 		if (!hash_default) {
 			void *va = pfn_to_kaddr(pfn) + f->page_offset;
 			BUG_ON(PageHighMem(skb_frag_page(f)));
-			finv_buffer_remote(va, f->size, 0);
+			finv_buffer_remote(va, skb_frag_size(f), 0);
 		}
 
 		cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
@@ -1735,8 +1742,8 @@
  * Sometimes, if "sendfile()" requires copying, we will be called with
  * "data" containing the header and payload, with "frags" being empty.
  *
- * In theory, "sh->nr_frags" could be 3, but in practice, it seems
- * that this will never actually happen.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments, which must be handled carefully in LEPP.
  *
  * See "emulate_large_send_offload()" for some reference code, which
  * does not handle checksumming.
@@ -1844,10 +1851,8 @@
 
 	spin_lock_irqsave(&priv->eq_lock, irqflags);
 
-	/*
-	 * Handle completions if needed to make room.
-	 * HACK: Spin until there is sufficient room.
-	 */
+	/* Handle completions if needed to make room. */
+	/* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
 	if (lepp_num_free_comp_slots(eq) == 0) {
 		nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
 		if (nolds == 0) {
@@ -1861,6 +1866,7 @@
 	cmd_tail = eq->cmd_tail;
 
 	/* Prepare to advance, detecting full queue. */
+	/* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
 	cmd_next = cmd_tail + cmd_size;
 	if (cmd_tail < cmd_head && cmd_next >= cmd_head)
 		goto busy;
@@ -2023,10 +2029,8 @@
 
 	spin_lock_irqsave(&priv->eq_lock, irqflags);
 
-	/*
-	 * Handle completions if needed to make room.
-	 * HACK: Spin until there is sufficient room.
-	 */
+	/* Handle completions if needed to make room. */
+	/* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
 	if (lepp_num_free_comp_slots(eq) == 0) {
 		nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
 		if (nolds == 0) {
@@ -2040,6 +2044,7 @@
 	cmd_tail = eq->cmd_tail;
 
 	/* Copy the commands, or fail. */
+	/* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
 	for (i = 0; i < num_frags; i++) {
 
 		/* Prepare to advance, detecting full queue. */
@@ -2261,6 +2266,23 @@
 	return 0;
 }
 
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+	struct tile_net_priv *priv = netdev_priv(dev);
+	disable_percpu_irq(priv->intr_id);
+	tile_net_handle_ingress_interrupt(priv->intr_id, dev);
+	enable_percpu_irq(priv->intr_id, 0);
+}
+#endif
+
+
 static const struct net_device_ops tile_net_ops = {
 	.ndo_open = tile_net_open,
 	.ndo_stop = tile_net_stop,
@@ -2269,7 +2291,10 @@
 	.ndo_get_stats = tile_net_get_stats,
 	.ndo_change_mtu = tile_net_change_mtu,
 	.ndo_tx_timeout = tile_net_tx_timeout,
-	.ndo_set_mac_address = tile_net_set_mac_address
+	.ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = tile_net_netpoll,
+#endif
 };
 
 
@@ -2409,7 +2434,7 @@
  */
 static int tile_net_init_module(void)
 {
-	pr_info("Tilera IPP Net Driver\n");
+	pr_info("Tilera Network Driver\n");
 
 	tile_net_devs[0] = tile_net_dev_init("xgbe0");
 	tile_net_devs[1] = tile_net_dev_init("xgbe1");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 39b8cf3..fcfa01f 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -503,30 +503,32 @@
 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
 static void rhine_restart_tx(struct net_device *dev);
 
-static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high)
+static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
 {
 	void __iomem *ioaddr = rp->base;
 	int i;
 
 	for (i = 0; i < 1024; i++) {
-		if (high ^ !!(ioread8(ioaddr + reg) & mask))
+		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
+
+		if (low ^ has_mask_bits)
 			break;
 		udelay(10);
 	}
 	if (i > 64) {
 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
-			  "count: %04d\n", high ? "high" : "low", reg, mask, i);
+			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
 	}
 }
 
 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
 {
-	rhine_wait_bit(rp, reg, mask, true);
+	rhine_wait_bit(rp, reg, mask, false);
 }
 
 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
 {
-	rhine_wait_bit(rp, reg, mask, false);
+	rhine_wait_bit(rp, reg, mask, true);
 }
 
 static u32 rhine_get_events(struct rhine_private *rp)
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index a0d1913..e250675 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -147,7 +147,7 @@
 	struct dma_async_tx_descriptor *desc;
 	struct dma_chan *chan = buf->chan;
 
-	desc = chan->device->device_prep_slave_sg(chan, &buf->sg, 1, dir,
+	desc = dmaengine_prep_slave_sg(chan, &buf->sg, 1, dir,
 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (desc) {
 		desc->callback = cb;
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 0856e1b..f08c85a 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -162,7 +162,8 @@
 	/* Enable Auto Power Saving mode */
 	c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
 	c |= IP101A_G_APS_ON;
-	return c;
+
+	return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
 }
 
 static int ip175c_read_status(struct phy_device *phydev)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 159da29..33f8c51 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@
 /* Prototypes. */
 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 			struct file *file, unsigned int cmd, unsigned long arg);
-static void ppp_xmit_process(struct ppp *ppp);
+static int ppp_xmit_process(struct ppp *ppp);
 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
 static void ppp_push(struct ppp *ppp);
 static void ppp_channel_push(struct channel *pch);
@@ -968,9 +968,9 @@
 	proto = npindex_to_proto[npi];
 	put_unaligned_be16(proto, pp);
 
-	netif_stop_queue(dev);
 	skb_queue_tail(&ppp->file.xq, skb);
-	ppp_xmit_process(ppp);
+	if (!ppp_xmit_process(ppp))
+		netif_stop_queue(dev);
 	return NETDEV_TX_OK;
 
  outf:
@@ -1048,10 +1048,11 @@
  * Called to do any work queued up on the transmit side
  * that can now be done.
  */
-static void
+static int
 ppp_xmit_process(struct ppp *ppp)
 {
 	struct sk_buff *skb;
+	int ret = 0;
 
 	ppp_xmit_lock(ppp);
 	if (!ppp->closing) {
@@ -1061,10 +1062,13 @@
 			ppp_send_frame(ppp, skb);
 		/* If there's no work left to do, tell the core net
 		   code that we can accept some more. */
-		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
+		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
 			netif_wake_queue(ppp->dev);
+			ret = 1;
+		}
 	}
 	ppp_xmit_unlock(ppp);
+	return ret;
 }
 
 static inline struct sk_buff *
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a57f057..91d2588 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -375,8 +375,8 @@
 	struct net_device *ndev = rio_get_drvdata(rdev);
 	struct rionet_peer *peer, *tmp;
 
-	free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
-					__fls(sizeof(void *)) + 4 : 0);
+	free_pages((unsigned long)rionet_active, get_order(sizeof(void *) *
+			RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size)));
 	unregister_netdev(ndev);
 	free_netdev(ndev);
 
@@ -432,15 +432,16 @@
 	int rc = 0;
 	struct rionet_private *rnet;
 	u16 device_id;
+	const size_t rionet_active_bytes = sizeof(void *) *
+				RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
 
 	rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
-			mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
+			get_order(rionet_active_bytes));
 	if (!rionet_active) {
 		rc = -ENOMEM;
 		goto out;
 	}
-	memset((void *)rionet_active, 0, sizeof(void *) *
-				RIO_MAX_ROUTE_ENTRIES(mport->sys_size));
+	memset((void *)rionet_active, 0, rionet_active_bytes);
 
 	/* Set up private area */
 	rnet = netdev_priv(ndev);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 3886b30..3e41b00 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -165,13 +165,13 @@
 				memcpy(skb_put(skb, 1), page_address(page), 1);
 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 						page, 1, req->actual_length,
-						req->actual_length);
+						PAGE_SIZE);
 				page = NULL;
 			}
 		} else {
 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 					page, 0, req->actual_length,
-					req->actual_length);
+					PAGE_SIZE);
 			page = NULL;
 		}
 		if (req->actual_length < PAGE_SIZE)
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 439690b..685a4e2 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -93,6 +93,7 @@
 	/* no jumbogram (16K) support for now */
 
 	dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
+	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 
 	return 0;
 }
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6dda2fe..d363b31 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -85,32 +85,6 @@
 #define INT_CRERR_CNT		0x06
 #define INT_COL_CNT		0x07
 
-/* Transmit status register errors */
-#define TSR_ECOL		(1<<5)
-#define TSR_LCOL		(1<<4)
-#define TSR_LOSS_CRS		(1<<3)
-#define TSR_JBR			(1<<2)
-#define TSR_ERRORS		(TSR_ECOL | TSR_LCOL | TSR_LOSS_CRS | TSR_JBR)
-/* Receive status register errors */
-#define RSR_CRC			(1<<2)
-#define RSR_FAE			(1<<1)
-#define RSR_ERRORS		(RSR_CRC | RSR_FAE)
-
-/* Media status register definitions */
-#define MSR_DUPLEX		(1<<4)
-#define MSR_SPEED		(1<<3)
-#define MSR_LINK		(1<<2)
-
-/* Interrupt pipe data */
-#define INT_TSR			0x00
-#define INT_RSR			0x01
-#define INT_MSR			0x02
-#define INT_WAKSR		0x03
-#define INT_TXOK_CNT		0x04
-#define INT_RXLOST_CNT		0x05
-#define INT_CRERR_CNT		0x06
-#define INT_COL_CNT		0x07
-
 
 #define	RTL8150_MTU		1540
 #define	RTL8150_TX_TIMEOUT	(HZ)
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index c3197ce..34db195 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -337,6 +337,11 @@
 	.driver_info = ZAURUS_PXA_INFO,
 },
 {
+	/* Motorola Rokr E6 */
+	USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6027, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &bogus_mdlm_info,
+}, {
 	/* Motorola MOTOMAGX phones */
 	USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 019da01..4de2760 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -625,12 +625,13 @@
 
 	/* This can happen with OOM and indirect buffers. */
 	if (unlikely(capacity < 0)) {
-		if (net_ratelimit()) {
-			if (likely(capacity == -ENOMEM)) {
+		if (likely(capacity == -ENOMEM)) {
+			if (net_ratelimit()) {
 				dev_warn(&dev->dev,
 					 "TX queue failure: out of memory\n");
 			} else {
-				dev->stats.tx_fifo_errors++;
+			dev->stats.tx_fifo_errors++;
+			if (net_ratelimit())
 				dev_warn(&dev->dev,
 					 "Unexpected TX queue failure: %d\n",
 					 capacity);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 423eb26..d70ede7 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -290,8 +290,8 @@
 	  Frame Relay or X.25/LAPB.
 
 	  If you want the module to be automatically loaded when the interface
-	  is referenced then you should add "alias hdlcX farsync" to
-	  /etc/modprobe.conf for each interface, where X is 0, 1, 2, ..., or
+	  is referenced then you should add "alias hdlcX farsync" to a file
+	  in /etc/modprobe.d/ for each interface, where X is 0, 1, 2, ..., or
 	  simply use "alias hdlc* farsync" to indicate all of them.
 
 	  To compile this driver as a module, choose M here: the
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 129ba36..4b66ab1 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -53,17 +53,6 @@
 				   &fops_netdev_queue_stopped);
 }
 
-
-/*
- * inode->i_private has the @data argument to debugfs_create_file()
- */
-static
-int i2400m_stats_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-	return 0;
-}
-
 /*
  * We don't allow partial reads of this file, as then the reader would
  * get weirdly confused data as it is updated.
@@ -117,7 +106,7 @@
 static
 const struct file_operations i2400m_rx_stats_fops = {
 	.owner =	THIS_MODULE,
-	.open =		i2400m_stats_open,
+	.open =		simple_open,
 	.read =		i2400m_rx_stats_read,
 	.write =	i2400m_rx_stats_write,
 	.llseek =	default_llseek,
@@ -170,7 +159,7 @@
 static
 const struct file_operations i2400m_tx_stats_fops = {
 	.owner =	THIS_MODULE,
-	.open =		i2400m_stats_open,
+	.open =		simple_open,
 	.read =		i2400m_tx_stats_read,
 	.write =	i2400m_tx_stats_write,
 	.llseek =	default_llseek,
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 63e4b70..1d76ae8 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -597,7 +597,8 @@
 	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
 
 	strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
-	strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
+	strncpy(info->fw_version,
+	        i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
 	if (net_dev->dev.parent)
 		strncpy(info->bus_info, dev_name(net_dev->dev.parent),
 			sizeof(info->bus_info) - 1);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 2c1b8b6..29b1e03 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -339,6 +339,23 @@
 	return result;
 }
 
+static void i2400mu_get_drvinfo(struct net_device *net_dev,
+                                struct ethtool_drvinfo *info)
+{
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	struct usb_device *udev = i2400mu->usb_dev;
+
+	strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
+	strncpy(info->fw_version,
+	        i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
+	usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops i2400mu_ethtool_ops = {
+	.get_drvinfo = i2400mu_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
 
 static
 void i2400mu_netdev_setup(struct net_device *net_dev)
@@ -347,6 +364,7 @@
 	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
 	i2400mu_init(i2400mu);
 	i2400m_netdev_setup(net_dev);
+	net_dev->ethtool_ops = &i2400mu_ethtool_ops;
 }
 
 
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 8c5ce8b..e5e8f45 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -71,13 +71,6 @@
 module_param_named(debug, ath5k_debug, uint, 0);
 
 
-static int ath5k_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-
 /* debugfs: registers */
 
 struct reg {
@@ -265,7 +258,7 @@
 static const struct file_operations fops_beacon = {
 	.read = read_file_beacon,
 	.write = write_file_beacon,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -285,7 +278,7 @@
 
 static const struct file_operations fops_reset = {
 	.write = write_file_reset,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = noop_llseek,
 };
@@ -365,7 +358,7 @@
 static const struct file_operations fops_debug = {
 	.read = read_file_debug,
 	.write = write_file_debug,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -477,7 +470,7 @@
 static const struct file_operations fops_antenna = {
 	.read = read_file_antenna,
 	.write = write_file_antenna,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -532,7 +525,7 @@
 
 static const struct file_operations fops_misc = {
 	.read = read_file_misc,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 };
 
@@ -647,7 +640,7 @@
 static const struct file_operations fops_frameerrors = {
 	.read = read_file_frameerrors,
 	.write = write_file_frameerrors,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -810,7 +803,7 @@
 static const struct file_operations fops_ani = {
 	.read = read_file_ani,
 	.write = write_file_ani,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -881,7 +874,7 @@
 static const struct file_operations fops_queue = {
 	.read = read_file_queue,
 	.write = write_file_queue,
-	.open = ath5k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 552adb3..d01403a 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -217,12 +217,6 @@
 		   target->credit_info->cur_free_credits);
 }
 
-static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
 {
 	switch (war) {
@@ -263,7 +257,7 @@
 
 static const struct file_operations fops_war_stats = {
 	.read = read_file_war_stats,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -488,7 +482,7 @@
 }
 
 static const struct file_operations fops_fwlog_mask = {
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.read = ath6kl_fwlog_mask_read,
 	.write = ath6kl_fwlog_mask_write,
 	.owner = THIS_MODULE,
@@ -634,7 +628,7 @@
 
 static const struct file_operations fops_tgt_stats = {
 	.read = read_file_tgt_stats,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -699,7 +693,7 @@
 
 static const struct file_operations fops_credit_dist_stats = {
 	.read = read_file_credit_dist_stats,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -802,7 +796,7 @@
 }
 
 static const struct file_operations fops_endpoint_stats = {
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.read = ath6kl_endpoint_stats_read,
 	.write = ath6kl_endpoint_stats_write,
 	.owner = THIS_MODULE,
@@ -875,7 +869,7 @@
 static const struct file_operations fops_diag_reg_read = {
 	.read = ath6kl_regread_read,
 	.write = ath6kl_regread_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -999,7 +993,7 @@
 static const struct file_operations fops_lrssi_roam_threshold = {
 	.read = ath6kl_lrssi_roam_read,
 	.write = ath6kl_lrssi_roam_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1061,7 +1055,7 @@
 static const struct file_operations fops_diag_reg_write = {
 	.read = ath6kl_regwrite_read,
 	.write = ath6kl_regwrite_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1166,7 +1160,7 @@
 
 static const struct file_operations fops_roam_table = {
 	.read = ath6kl_roam_table_read,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1204,7 +1198,7 @@
 
 static const struct file_operations fops_force_roam = {
 	.write = ath6kl_force_roam_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1244,7 +1238,7 @@
 
 static const struct file_operations fops_roam_mode = {
 	.write = ath6kl_roam_mode_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1286,7 +1280,7 @@
 }
 
 static const struct file_operations fops_keepalive = {
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.read = ath6kl_keepalive_read,
 	.write = ath6kl_keepalive_write,
 	.owner = THIS_MODULE,
@@ -1331,7 +1325,7 @@
 }
 
 static const struct file_operations fops_disconnect_timeout = {
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.read = ath6kl_disconnect_timeout_read,
 	.write = ath6kl_disconnect_timeout_write,
 	.owner = THIS_MODULE,
@@ -1512,7 +1506,7 @@
 
 static const struct file_operations fops_create_qos = {
 	.write = ath6kl_create_qos_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1560,7 +1554,7 @@
 
 static const struct file_operations fops_delete_qos = {
 	.write = ath6kl_delete_qos_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1593,7 +1587,7 @@
 
 static const struct file_operations fops_bgscan_int = {
 	.write = ath6kl_bgscan_int_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1651,7 +1645,7 @@
 static const struct file_operations fops_listen_int = {
 	.read = ath6kl_listen_int_read,
 	.write = ath6kl_listen_int_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1711,7 +1705,7 @@
 
 static const struct file_operations fops_power_params = {
 	.write = ath6kl_power_params_write,
-	.open = ath6kl_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 2f4b48e..e5cceb0 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -20,7 +20,6 @@
 
 /* Common calibration code */
 
-#define ATH9K_NF_TOO_HIGH	-60
 
 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
 {
@@ -346,10 +345,10 @@
 			"NF calibrated [%s] [chain %d] is %d\n",
 			(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
 
-		if (nf[i] > ATH9K_NF_TOO_HIGH) {
+		if (nf[i] > limit->max) {
 			ath_dbg(common, CALIBRATE,
 				"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
-				i, nf[i], ATH9K_NF_TOO_HIGH);
+				i, nf[i], limit->max);
 			nf[i] = limit->max;
 		} else if (nf[i] < limit->min) {
 			ath_dbg(common, CALIBRATE,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 35d1c8e..ff47b32 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -26,11 +26,6 @@
 #define REG_READ_D(_ah, _reg) \
 	ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
 
-static int ath9k_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
 
 static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
 				      size_t count, loff_t *ppos)
@@ -83,7 +78,7 @@
 static const struct file_operations fops_debug = {
 	.read = read_file_debug,
 	.write = write_file_debug,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -129,7 +124,7 @@
 static const struct file_operations fops_tx_chainmask = {
 	.read = read_file_tx_chainmask,
 	.write = write_file_tx_chainmask,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -172,7 +167,7 @@
 static const struct file_operations fops_rx_chainmask = {
 	.read = read_file_rx_chainmask,
 	.write = write_file_rx_chainmask,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -223,7 +218,7 @@
 static const struct file_operations fops_disable_ani = {
 	.read = read_file_disable_ani,
 	.write = write_file_disable_ani,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -324,7 +319,7 @@
 
 static const struct file_operations fops_dma = {
 	.read = read_file_dma,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -446,7 +441,7 @@
 
 static const struct file_operations fops_interrupt = {
 	.read = read_file_interrupt,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -852,28 +847,28 @@
 
 static const struct file_operations fops_xmit = {
 	.read = read_file_xmit,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
 
 static const struct file_operations fops_stations = {
 	.read = read_file_stations,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
 
 static const struct file_operations fops_misc = {
 	.read = read_file_misc,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
 
 static const struct file_operations fops_reset = {
 	.read = read_file_reset,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1016,7 +1011,7 @@
 
 static const struct file_operations fops_recv = {
 	.read = read_file_recv,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1055,7 +1050,7 @@
 static const struct file_operations fops_regidx = {
 	.read = read_file_regidx,
 	.write = write_file_regidx,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1102,7 +1097,7 @@
 static const struct file_operations fops_regval = {
 	.read = read_file_regval,
 	.write = write_file_regval,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1191,7 +1186,7 @@
 
 static const struct file_operations fops_dump_nfcal = {
 	.read = read_file_dump_nfcal,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1219,7 +1214,7 @@
 
 static const struct file_operations fops_base_eeprom = {
 	.read = read_file_base_eeprom,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -1247,7 +1242,7 @@
 
 static const struct file_operations fops_modal_eeprom = {
 	.read = read_file_modal_eeprom,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 106d031..4364c10 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -60,16 +60,9 @@
 	return retval;
 }
 
-static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
 static const struct file_operations fops_dfs_stats = {
 	.read = read_file_dfs,
-	.open = ath9k_dfs_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index d3ff33c..3035deb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -16,12 +16,6 @@
 
 #include "htc.h"
 
-static int ath9k_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
 				       size_t count, loff_t *ppos)
 {
@@ -75,7 +69,7 @@
 
 static const struct file_operations fops_tgt_int_stats = {
 	.read = read_file_tgt_int_stats,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -145,7 +139,7 @@
 
 static const struct file_operations fops_tgt_tx_stats = {
 	.read = read_file_tgt_tx_stats,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -191,7 +185,7 @@
 
 static const struct file_operations fops_tgt_rx_stats = {
 	.read = read_file_tgt_rx_stats,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -243,7 +237,7 @@
 
 static const struct file_operations fops_xmit = {
 	.read = read_file_xmit,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -364,7 +358,7 @@
 
 static const struct file_operations fops_recv = {
 	.read = read_file_recv,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -399,7 +393,7 @@
 
 static const struct file_operations fops_slot = {
 	.read = read_file_slot,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -446,7 +440,7 @@
 
 static const struct file_operations fops_queue = {
 	.read = read_file_queue,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -487,7 +481,7 @@
 static const struct file_operations fops_debug = {
 	.read = read_file_debug,
 	.write = write_file_debug,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -636,7 +630,7 @@
 
 static const struct file_operations fops_base_eeprom = {
 	.read = read_file_base_eeprom,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
@@ -917,7 +911,7 @@
 
 static const struct file_operations fops_modal_eeprom = {
 	.read = read_file_modal_eeprom,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 60159f4..cb00645 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -680,7 +680,7 @@
 	hw->queues = 4;
 	hw->max_rates = 4;
 	hw->channel_change_time = 5000;
-	hw->max_listen_interval = 10;
+	hw->max_listen_interval = 1;
 	hw->max_rate_tries = 10;
 	hw->sta_data_size = sizeof(struct ath_node);
 	hw->vif_data_size = sizeof(struct ath_vif);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3879485..2504ab0 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,15 +118,13 @@
 	if (--sc->ps_usecount != 0)
 		goto unlock;
 
-	if (sc->ps_flags & PS_WAIT_FOR_TX_ACK)
-		goto unlock;
-
-	if (sc->ps_idle)
+	if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
 		mode = ATH9K_PM_FULL_SLEEP;
 	else if (sc->ps_enabled &&
 		 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
 			      PS_WAIT_FOR_CAB |
-			      PS_WAIT_FOR_PSPOLL_DATA)))
+			      PS_WAIT_FOR_PSPOLL_DATA |
+			      PS_WAIT_FOR_TX_ACK)))
 		mode = ATH9K_PM_NETWORK_SLEEP;
 	else
 		goto unlock;
@@ -640,7 +638,7 @@
 	an->sta = sta;
 	an->vif = vif;
 
-	if (sta->ht_cap.ht_supported) {
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
 		ath_tx_node_init(sc, an);
 		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
 				     sta->ht_cap.ampdu_factor);
@@ -659,7 +657,7 @@
 	an->sta = NULL;
 #endif
 
-	if (sta->ht_cap.ht_supported)
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
 		ath_tx_node_cleanup(sc, an);
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 4f84849..08bb455 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1480,12 +1480,6 @@
 
 #ifdef CONFIG_ATH9K_DEBUGFS
 
-static int ath9k_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
 {
@@ -1553,7 +1547,7 @@
 
 static const struct file_operations fops_rcstat = {
 	.read = read_file_rcstat,
-	.open = ath9k_debugfs_open,
+	.open = simple_open,
 	.owner = THIS_MODULE
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index f4ae3ba..1c4583c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1913,13 +1913,13 @@
 		if (sc->rx.frag) {
 			int space = skb->len - skb_tailroom(hdr_skb);
 
-			sc->rx.frag = NULL;
-
 			if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
 				dev_kfree_skb(skb);
 				goto requeue_drop_frag;
 			}
 
+			sc->rx.frag = NULL;
+
 			skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
 						  skb->len);
 			dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 3c16422..93fe600 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -48,11 +48,6 @@
 #define ADD(buf, off, max, fmt, args...)				\
 	off += snprintf(&buf[off], max - off, fmt, ##args);
 
-static int carl9170_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
 
 struct carl9170_debugfs_fops {
 	unsigned int read_bufsize;
@@ -178,7 +173,7 @@
 	.attr = _attr,							\
 	.req_dev_state = _dstate,					\
 	.fops = {							\
-		.open	= carl9170_debugfs_open,			\
+		.open	= simple_open,					\
 		.read	= carl9170_debugfs_read,			\
 		.write	= carl9170_debugfs_write,			\
 		.owner	= THIS_MODULE					\
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index e751fde..e807bd9 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -500,12 +500,6 @@
 
 #undef fappend
 
-static int b43_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
 				size_t count, loff_t *ppos)
 {
@@ -624,7 +618,7 @@
 		.read	= _read,				\
 		.write	= _write,				\
 		.fops	= {					\
-			.open	= b43_debugfs_open,		\
+			.open	= simple_open,			\
 			.read	= b43_debugfs_read,		\
 			.write	= b43_debugfs_write,		\
 			.llseek = generic_file_llseek,		\
diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
index 5e28ad0..1965edb 100644
--- a/drivers/net/wireless/b43legacy/debugfs.c
+++ b/drivers/net/wireless/b43legacy/debugfs.c
@@ -197,12 +197,6 @@
 
 #undef fappend
 
-static int b43legacy_debugfs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
 				size_t count, loff_t *ppos)
 {
@@ -331,7 +325,7 @@
 		.read	= _read,				\
 		.write	= _write,				\
 		.fops	= {					\
-			.open	= b43legacy_debugfs_open,		\
+			.open	= simple_open,				\
 			.read	= b43legacy_debugfs_read,		\
 			.write	= b43legacy_debugfs_write,		\
 			.llseek = generic_file_llseek,			\
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 4fcdac6..2b02257 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11507,9 +11507,9 @@
 			rc = -ENOMEM;
 			goto out;
 		}
-		/* translate geo->bg to a_band.channels */
+		/* translate geo->a to a_band.channels */
 		for (i = 0; i < geo->a_channels; i++) {
-			a_band->channels[i].band = IEEE80211_BAND_2GHZ;
+			a_band->channels[i].band = IEEE80211_BAND_5GHZ;
 			a_band->channels[i].center_freq = geo->a[i].freq;
 			a_band->channels[i].hw_value = geo->a[i].channel;
 			a_band->channels[i].max_power = geo->a[i].max_power;
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 0c12093..faec404 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -2673,8 +2673,6 @@
 
 	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
 		mutex_lock(&il->mutex);
-		/* FIXME: vif can be dereferenced */
-		il->vif = NULL;
 		il->is_open = 0;
 		mutex_unlock(&il->mutex);
 		il3945_down(il);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 70bee1a..4b10157 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -821,12 +821,6 @@
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-static int
-il3945_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
 
 static ssize_t
 il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
@@ -862,7 +856,7 @@
 
 static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
 	.read = il3945_sta_dbgfs_stats_table_read,
-	.open = il3945_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 17f1c68..c46275a 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5652,8 +5652,6 @@
 
 	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
 		mutex_lock(&il->mutex);
-		/* FIXME: do we dereference vif without mutex locked ? */
-		il->vif = NULL;
 		il->is_open = 0;
 
 		__il4965_down(il);
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index d7e2856..11ab124 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2518,12 +2518,6 @@
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-static int
-il4965_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
 
 static void
 il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
@@ -2695,7 +2689,7 @@
 static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
 	.write = il4965_rs_sta_dbgfs_scale_table_write,
 	.read = il4965_rs_sta_dbgfs_scale_table_read,
-	.open = il4965_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -2740,7 +2734,7 @@
 
 static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
 	.read = il4965_rs_sta_dbgfs_stats_table_read,
-	.open = il4965_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -2768,7 +2762,7 @@
 
 static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
 	.read = il4965_rs_sta_dbgfs_rate_scale_data_read,
-	.open = il4965_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e5ac047..eaf24945 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4508,6 +4508,7 @@
 {
 	struct il_priv *il = hw->priv;
 	int err;
+	bool reset;
 
 	mutex_lock(&il->mutex);
 	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
@@ -4518,7 +4519,12 @@
 		goto out;
 	}
 
-	if (il->vif) {
+	/*
+	 * We do not support multiple virtual interfaces, but on hardware reset
+	 * we have to add the same interface again.
+	 */
+	reset = (il->vif == vif);
+	if (il->vif && !reset) {
 		err = -EOPNOTSUPP;
 		goto out;
 	}
@@ -4528,8 +4534,11 @@
 
 	err = il_set_mode(il);
 	if (err) {
-		il->vif = NULL;
-		il->iw_mode = NL80211_IFTYPE_STATION;
+		IL_WARN("Fail to set mode %d\n", vif->type);
+		if (!reset) {
+			il->vif = NULL;
+			il->iw_mode = NL80211_IFTYPE_STATION;
+		}
 	}
 
 out:
@@ -5279,9 +5288,9 @@
 		D_MAC80211("BSSID %pM\n", bss_conf->bssid);
 
 		/*
-		 * If there is currently a HW scan going on in the
-		 * background then we need to cancel it else the RXON
-		 * below/in post_associate will fail.
+		 * If there is currently a HW scan going on in the background,
+		 * then we need to cancel it, otherwise sometimes we are not
+		 * able to authenticate (FIXME: why ?)
 		 */
 		if (il_scan_cancel_timeout(il, 100)) {
 			D_MAC80211("leave - scan abort failed\n");
@@ -5290,14 +5299,10 @@
 		}
 
 		/* mac80211 only sets assoc when in STATION mode */
-		if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
-			memcpy(il->staging.bssid_addr, bss_conf->bssid,
-			       ETH_ALEN);
+		memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
 
-			/* currently needed in a few places */
-			memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
-		} else
-			il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+		/* FIXME: currently needed in a few places */
+		memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
 	}
 
 	/*
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index 2298491..eff2650 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -160,18 +160,12 @@
 					const char __user *user_buf,    \
 					size_t count, loff_t *ppos);
 
-static int
-il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
 
 #define DEBUGFS_READ_FILE_OPS(name)				\
 	DEBUGFS_READ_FUNC(name);				\
 static const struct file_operations il_dbgfs_##name##_ops = {	\
 	.read = il_dbgfs_##name##_read,				\
-	.open = il_dbgfs_open_file_generic,			\
+	.open = simple_open,					\
 	.llseek = generic_file_llseek,				\
 };
 
@@ -179,7 +173,7 @@
 	DEBUGFS_WRITE_FUNC(name);				\
 static const struct file_operations il_dbgfs_##name##_ops = {	\
 	.write = il_dbgfs_##name##_write,			\
-	.open = il_dbgfs_open_file_generic,			\
+	.open = simple_open,					\
 	.llseek = generic_file_llseek,				\
 };
 
@@ -189,7 +183,7 @@
 static const struct file_operations il_dbgfs_##name##_ops = {	\
 	.write = il_dbgfs_##name##_write,			\
 	.read = il_dbgfs_##name##_read,				\
-	.open = il_dbgfs_open_file_generic,			\
+	.open = simple_open,					\
 	.llseek = generic_file_llseek,				\
 };
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 53f8c51..7e590b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -3083,11 +3083,6 @@
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-static int open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
 static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
 			     u32 *rate_n_flags, int index)
 {
@@ -3226,7 +3221,7 @@
 static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
 	.write = rs_sta_dbgfs_scale_table_write,
 	.read = rs_sta_dbgfs_scale_table_read,
-	.open = open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
@@ -3269,7 +3264,7 @@
 
 static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
 	.read = rs_sta_dbgfs_stats_table_read,
-	.open = open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -3295,7 +3290,7 @@
 
 static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
 	.read = rs_sta_dbgfs_rate_scale_data_read,
-	.open = open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index b7b1c04..2bbaebd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -84,17 +84,11 @@
 					size_t count, loff_t *ppos);
 
 
-static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 #define DEBUGFS_READ_FILE_OPS(name)                                     \
 	DEBUGFS_READ_FUNC(name);                                        \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
 	.read = iwl_dbgfs_##name##_read,                       		\
-	.open = iwl_dbgfs_open_file_generic,                    	\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
@@ -102,7 +96,7 @@
 	DEBUGFS_WRITE_FUNC(name);                                       \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
 	.write = iwl_dbgfs_##name##_write,                              \
-	.open = iwl_dbgfs_open_file_generic,                    	\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
@@ -113,7 +107,7 @@
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
 	.write = iwl_dbgfs_##name##_write,                              \
 	.read = iwl_dbgfs_##name##_read,                                \
-	.open = iwl_dbgfs_open_file_generic,                            \
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index b4f796c..4d7b30d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -1898,17 +1898,11 @@
 					size_t count, loff_t *ppos);
 
 
-static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 #define DEBUGFS_READ_FILE_OPS(name)					\
 	DEBUGFS_READ_FUNC(name);					\
 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
 	.read = iwl_dbgfs_##name##_read,				\
-	.open = iwl_dbgfs_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
@@ -1916,7 +1910,7 @@
 	DEBUGFS_WRITE_FUNC(name);                                       \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
 	.write = iwl_dbgfs_##name##_write,                              \
-	.open = iwl_dbgfs_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
@@ -1926,7 +1920,7 @@
 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
 	.write = iwl_dbgfs_##name##_write,				\
 	.read = iwl_dbgfs_##name##_read,				\
-	.open = iwl_dbgfs_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index 87eef57..b6199d1 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -99,12 +99,6 @@
 			iwm_debugfs_u32_read, iwm_debugfs_dbg_modules_write,
 			"%llu\n");
 
-static int iwm_generic_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-	return 0;
-}
-
 
 static ssize_t iwm_debugfs_txq_read(struct file *filp, char __user *buffer,
 				   size_t count, loff_t *ppos)
@@ -401,28 +395,28 @@
 
 static const struct file_operations iwm_debugfs_txq_fops = {
 	.owner =	THIS_MODULE,
-	.open =		iwm_generic_open,
+	.open =		simple_open,
 	.read =		iwm_debugfs_txq_read,
 	.llseek =	default_llseek,
 };
 
 static const struct file_operations iwm_debugfs_tx_credit_fops = {
 	.owner =	THIS_MODULE,
-	.open =		iwm_generic_open,
+	.open =		simple_open,
 	.read =		iwm_debugfs_tx_credit_read,
 	.llseek =	default_llseek,
 };
 
 static const struct file_operations iwm_debugfs_rx_ticket_fops = {
 	.owner =	THIS_MODULE,
-	.open =		iwm_generic_open,
+	.open =		simple_open,
 	.read =		iwm_debugfs_rx_ticket_read,
 	.llseek =	default_llseek,
 };
 
 static const struct file_operations iwm_debugfs_fw_err_fops = {
 	.owner =	THIS_MODULE,
-	.open =		iwm_generic_open,
+	.open =		simple_open,
 	.read =		iwm_debugfs_fw_err_read,
 	.llseek =	default_llseek,
 };
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 764b40d..0042f20 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -264,13 +264,6 @@
 	return ret;
 }
 
-/* debugfs hooks */
-static int iwm_debugfs_sdio_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t iwm_debugfs_sdio_read(struct file *filp, char __user *buffer,
 				     size_t count, loff_t *ppos)
 {
@@ -363,7 +356,7 @@
 
 static const struct file_operations iwm_debugfs_sdio_fops = {
 	.owner =	THIS_MODULE,
-	.open =		iwm_debugfs_sdio_open,
+	.open =		simple_open,
 	.read =		iwm_debugfs_sdio_read,
 	.llseek =	default_llseek,
 };
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index c192671..a06cc28 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -21,12 +21,6 @@
 static void lbs_debug_init(struct lbs_private *priv);
 #endif
 
-static int open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t write_file_dummy(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
 {
@@ -696,7 +690,7 @@
 
 #define FOPS(fread, fwrite) { \
 	.owner = THIS_MODULE, \
-	.open = open_file_generic, \
+	.open = simple_open, \
 	.read = (fread), \
 	.write = (fwrite), \
 	.llseek = generic_file_llseek, \
@@ -962,7 +956,7 @@
 
 static const struct file_operations lbs_debug_fops = {
 	.owner = THIS_MODULE,
-	.open = open_file_generic,
+	.open = simple_open,
 	.write = lbs_debugfs_write,
 	.read = lbs_debugfs_read,
 	.llseek = default_llseek,
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index d26a78b..1a84507 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -140,18 +140,6 @@
 static int num_of_items = ARRAY_SIZE(items);
 
 /*
- * Generic proc file open handler.
- *
- * This function is called every time a file is accessed for read or write.
- */
-static int
-mwifiex_open_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-/*
  * Proc info file read handler.
  *
  * This function is called when the 'info' file is opened for reading.
@@ -676,19 +664,19 @@
 static const struct file_operations mwifiex_dfs_##name##_fops = {       \
 	.read = mwifiex_##name##_read,                                  \
 	.write = mwifiex_##name##_write,                                \
-	.open = mwifiex_open_generic,                                   \
+	.open = simple_open,                                            \
 };
 
 #define MWIFIEX_DFS_FILE_READ_OPS(name)                                 \
 static const struct file_operations mwifiex_dfs_##name##_fops = {       \
 	.read = mwifiex_##name##_read,                                  \
-	.open = mwifiex_open_generic,                                   \
+	.open = simple_open,                                            \
 };
 
 #define MWIFIEX_DFS_FILE_WRITE_OPS(name)                                \
 static const struct file_operations mwifiex_dfs_##name##_fops = {       \
 	.write = mwifiex_##name##_write,                                \
-	.open = mwifiex_open_generic,                                   \
+	.open = simple_open,                                            \
 };
 
 
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index dd6c64a..88e3ad2 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1336,6 +1336,10 @@
 	unsigned long flags;
 
 	sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
+	if (!sd) {
+		printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
+		return;
+	}
 	sd->buf = buf;
 	sd->len = len;
 	sd->type = type;
@@ -1353,6 +1357,10 @@
 	unsigned long flags;
 
 	sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
+	if (!sd) {
+		printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
+		return;
+	}
 	sd->len = -1; /* Abort */
 
 	spin_lock_irqsave(&priv->scan_lock, flags);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index cd490ab..001735f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -163,7 +163,13 @@
 
 		/* Reschedule urb to read TX status again instantly */
 		return true;
-	} else if (rt2800usb_txstatus_pending(rt2x00dev)) {
+	}
+
+	/* Check if there is any entry that timedout waiting on TX status */
+	if (rt2800usb_txstatus_timeout(rt2x00dev))
+		queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
+
+	if (rt2800usb_txstatus_pending(rt2x00dev)) {
 		/* Read register after 250 us */
 		hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
 			      HRTIMER_MODE_REL);
@@ -178,7 +184,7 @@
 	 * here again if status reading is needed.
 	 */
 	if (rt2800usb_txstatus_pending(rt2x00dev) &&
-	    test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
+	    !test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
 		return true;
 	else
 		return false;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index fc9901e..90cc5e7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1062,11 +1062,6 @@
 
 	set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
 
-	/*
-	 * Register the extra components.
-	 */
-	rt2x00rfkill_register(rt2x00dev);
-
 	return 0;
 }
 
@@ -1210,6 +1205,7 @@
 	rt2x00link_register(rt2x00dev);
 	rt2x00leds_register(rt2x00dev);
 	rt2x00debug_register(rt2x00dev);
+	rt2x00rfkill_register(rt2x00dev);
 
 	return 0;
 
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 5100235..e54488d 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -838,7 +838,10 @@
 	__le16 fc = hdr->frame_control;
 
 	txrate = ieee80211_get_tx_rate(hw, info);
-	tcb_desc->hw_rate = txrate->hw_value;
+	if (txrate)
+		tcb_desc->hw_rate = txrate->hw_value;
+	else
+		tcb_desc->hw_rate = 0;
 
 	if (ieee80211_is_data(fc)) {
 		/*
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 07dd38e..288b035 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -912,8 +912,13 @@
 	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
 	pskb = __skb_dequeue(&ring->queue);
-	if (pskb)
+	if (pskb) {
+		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+		pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
+				 (u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
+				 pskb->len, PCI_DMA_TODEVICE);
 		kfree_skb(pskb);
+	}
 
 	/*NB: the beacon data buffer must be 32-bit aligned. */
 	pskb = ieee80211_beacon_get(hw, mac->vif);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 1eec3a0..4c01624 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1893,7 +1893,7 @@
 		break;
 	case IO_CMD_PAUSE_DM_BY_SCAN:
 		rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
-		dm_digtable.cur_igvalue = 0x17;
+		dm_digtable.cur_igvalue = 0x37;
 		rtl92c_dm_write_dig(hw);
 		break;
 	default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 34591eeb..28fc5fb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3077,7 +3077,7 @@
 		break;
 	case IO_CMD_PAUSE_DM_BY_SCAN:
 		rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue;
-		de_digtable.cur_igvalue = 0x17;
+		de_digtable.cur_igvalue = 0x37;
 		rtl92d_dm_write_dig(hw);
 		break;
 	default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 4898c50..480862c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -91,7 +91,6 @@
 	u8 tid;
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-	static int header_print;
 
 	rtlpriv->dm.dm_initialgain_enable = true;
 	rtlpriv->dm.dm_flag = 0;
@@ -171,10 +170,6 @@
 	for (tid = 0; tid < 8; tid++)
 		skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
 
-	/* Only load firmware for first MAC */
-	if (header_print)
-		return 0;
-
 	/* for firmware buf */
 	rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
 	if (!rtlpriv->rtlhal.pfirmware) {
@@ -186,7 +181,6 @@
 	rtlpriv->max_fw_size = 0x8000;
 	pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
 	pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
-	header_print++;
 
 	/* request fw */
 	err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 2e1e352..d04dbda 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -124,46 +124,38 @@
 	return status;
 }
 
-static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
+static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
 {
+	struct device *dev = rtlpriv->io.dev;
+	struct usb_device *udev = to_usb_device(dev);
 	u8 request;
 	u16 wvalue;
 	u16 index;
-	u32 *data;
-	u32 ret;
+	__le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
 
-	data = kmalloc(sizeof(u32), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
 	request = REALTEK_USB_VENQT_CMD_REQ;
 	index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
 
 	wvalue = (u16)addr;
 	_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
-	ret = le32_to_cpu(*data);
-	kfree(data);
-	return ret;
+	if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+		rtlpriv->usb_data_index = 0;
+	return le32_to_cpu(*data);
 }
 
 static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-	struct device *dev = rtlpriv->io.dev;
-
-	return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
+	return (u8)_usb_read_sync(rtlpriv, addr, 1);
 }
 
 static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-	struct device *dev = rtlpriv->io.dev;
-
-	return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
+	return (u16)_usb_read_sync(rtlpriv, addr, 2);
 }
 
 static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-	struct device *dev = rtlpriv->io.dev;
-
-	return _usb_read_sync(to_usb_device(dev), addr, 4);
+	return _usb_read_sync(rtlpriv, addr, 4);
 }
 
 static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
@@ -955,6 +947,11 @@
 		return -ENOMEM;
 	}
 	rtlpriv = hw->priv;
+	rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
+				    GFP_KERNEL);
+	if (!rtlpriv->usb_data)
+		return -ENOMEM;
+	rtlpriv->usb_data_index = 0;
 	init_completion(&rtlpriv->firmware_loading_complete);
 	SET_IEEE80211_DEV(hw, &intf->dev);
 	udev = interface_to_usbdev(intf);
@@ -1025,6 +1022,7 @@
 	/* rtl_deinit_rfkill(hw); */
 	rtl_usb_deinit(hw);
 	rtl_deinit_core(hw);
+	kfree(rtlpriv->usb_data);
 	rtlpriv->cfg->ops->deinit_sw_leds(hw);
 	rtlpriv->cfg->ops->deinit_sw_vars(hw);
 	_rtl_usb_io_handler_release(hw);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index b591614..28ebc69 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -67,7 +67,7 @@
 #define QOS_QUEUE_NUM				4
 #define RTL_MAC80211_NUM_QUEUE			5
 #define REALTEK_USB_VENQT_MAX_BUF_SIZE		254
-
+#define RTL_USB_MAX_RX_COUNT			100
 #define QBSS_LOAD_SIZE				5
 #define MAX_WMMELE_LENGTH			64
 
@@ -1629,6 +1629,10 @@
 	   interface or hardware */
 	unsigned long status;
 
+	/* data buffer pointer for USB reads */
+	__le32 *usb_data;
+	int usb_data_index;
+
 	/*This must be the last item so
 	   that it points to the data allocated
 	   beyond  this structure like:
diff --git a/drivers/net/wireless/wl1251/debugfs.c b/drivers/net/wireless/wl1251/debugfs.c
index 6c27400..448da1f 100644
--- a/drivers/net/wireless/wl1251/debugfs.c
+++ b/drivers/net/wireless/wl1251/debugfs.c
@@ -47,7 +47,7 @@
 									\
 static const struct file_operations name## _ops = {			\
 	.read = name## _read,						\
-	.open = wl1251_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek	= generic_file_llseek,					\
 };
 
@@ -84,7 +84,7 @@
 									\
 static const struct file_operations sub## _ ##name## _ops = {		\
 	.read = sub## _ ##name## _read,					\
-	.open = wl1251_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek	= generic_file_llseek,					\
 };
 
@@ -117,12 +117,6 @@
 	mutex_unlock(&wl->mutex);
 }
 
-static int wl1251_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u");
 
 DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u");
@@ -235,7 +229,7 @@
 
 static const struct file_operations tx_queue_len_ops = {
 	.read = tx_queue_len_read,
-	.open = wl1251_open_file_generic,
+	.open = simple_open,
 	.llseek = generic_file_llseek,
 };
 
@@ -257,7 +251,7 @@
 
 static const struct file_operations tx_queue_status_ops = {
 	.read = tx_queue_status_read,
-	.open = wl1251_open_file_generic,
+	.open = simple_open,
 	.llseek = generic_file_llseek,
 };
 
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index e1cf727..564d495 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -63,7 +63,7 @@
 									\
 static const struct file_operations name## _ops = {			\
 	.read = name## _read,						\
-	.open = wl1271_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek	= generic_file_llseek,					\
 };
 
@@ -96,7 +96,7 @@
 									\
 static const struct file_operations sub## _ ##name## _ops = {		\
 	.read = sub## _ ##name## _read,					\
-	.open = wl1271_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek	= generic_file_llseek,					\
 };
 
@@ -126,12 +126,6 @@
 	mutex_unlock(&wl->mutex);
 }
 
-static int wl1271_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
 
 DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
@@ -243,7 +237,7 @@
 
 static const struct file_operations tx_queue_len_ops = {
 	.read = tx_queue_len_read,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -289,7 +283,7 @@
 static const struct file_operations gpio_power_ops = {
 	.read = gpio_power_read,
 	.write = gpio_power_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -308,7 +302,7 @@
 
 static const struct file_operations start_recovery_ops = {
 	.write = start_recovery_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -372,7 +366,7 @@
 static const struct file_operations dynamic_ps_timeout_ops = {
 	.read = dynamic_ps_timeout_read,
 	.write = dynamic_ps_timeout_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -441,7 +435,7 @@
 static const struct file_operations forced_ps_ops = {
 	.read = forced_ps_read,
 	.write = forced_ps_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -483,7 +477,7 @@
 static const struct file_operations split_scan_timeout_ops = {
 	.read = split_scan_timeout_read,
 	.write = split_scan_timeout_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -566,7 +560,7 @@
 
 static const struct file_operations driver_state_ops = {
 	.read = driver_state_read,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -675,7 +669,7 @@
 
 static const struct file_operations vifs_state_ops = {
 	.read = vifs_state_read,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -733,7 +727,7 @@
 static const struct file_operations dtim_interval_ops = {
 	.read = dtim_interval_read,
 	.write = dtim_interval_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -791,7 +785,7 @@
 static const struct file_operations suspend_dtim_interval_ops = {
 	.read = suspend_dtim_interval_read,
 	.write = suspend_dtim_interval_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -849,7 +843,7 @@
 static const struct file_operations beacon_interval_ops = {
 	.read = beacon_interval_read,
 	.write = beacon_interval_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -904,7 +898,7 @@
 static const struct file_operations rx_streaming_interval_ops = {
 	.read = rx_streaming_interval_read,
 	.write = rx_streaming_interval_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -959,7 +953,7 @@
 static const struct file_operations rx_streaming_always_ops = {
 	.read = rx_streaming_always_read,
 	.write = rx_streaming_always_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
@@ -1003,7 +997,7 @@
 
 static const struct file_operations beacon_filtering_ops = {
 	.write = beacon_filtering_write,
-	.open = wl1271_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 663b32c..0ebbb19 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1965,7 +1965,7 @@
 	if (xen_initial_domain())
 		return 0;
 
-	if (!xen_platform_pci_unplug)
+	if (xen_hvm_domain() && !xen_platform_pci_unplug)
 		return -ENODEV;
 
 	printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index ee8fd03..849357c 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -117,25 +117,17 @@
 }
 
 
-static int default_open(struct inode *inode, struct file *filp)
-{
-	if (inode->i_private)
-		filp->private_data = inode->i_private;
-	return 0;
-}
-
-
 static const struct file_operations ulong_fops = {
 	.read		= ulong_read_file,
 	.write		= ulong_write_file,
-	.open		= default_open,
+	.open		= simple_open,
 	.llseek		= default_llseek,
 };
 
 
 static const struct file_operations ulong_ro_fops = {
 	.read		= ulong_read_file,
-	.open		= default_open,
+	.open		= simple_open,
 	.llseek		= default_llseek,
 };
 
@@ -187,7 +179,7 @@
 
 static const struct file_operations atomic_ro_fops = {
 	.read		= atomic_read_file,
-	.open		= default_open,
+	.open		= simple_open,
 	.llseek		= default_llseek,
 };
 
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 060fd22..0f150f2 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -277,40 +277,6 @@
 	return 0;
 }
 
-/**
- * acpi_dev_run_wake - Enable/disable wake-up for given device.
- * @phys_dev: Device to enable/disable the platform to wake-up the system for.
- * @enable: Whether enable or disable the wake-up functionality.
- *
- * Find the ACPI device object corresponding to @pci_dev and try to
- * enable/disable the GPE associated with it.
- */
-static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
-{
-	struct acpi_device *dev;
-	acpi_handle handle;
-
-	if (!device_run_wake(phys_dev))
-		return -EINVAL;
-
-	handle = DEVICE_ACPI_HANDLE(phys_dev);
-	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
-		dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
-			__func__);
-		return -ENODEV;
-	}
-
-	if (enable) {
-		acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
-		acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
-	} else {
-		acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
-		acpi_disable_wakeup_device_power(dev);
-	}
-
-	return 0;
-}
-
 static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
 {
 	while (bus->parent) {
@@ -318,14 +284,14 @@
 
 		if (bridge->pme_interrupt)
 			return;
-		if (!acpi_dev_run_wake(&bridge->dev, enable))
+		if (!acpi_pm_device_run_wake(&bridge->dev, enable))
 			return;
 		bus = bus->parent;
 	}
 
 	/* We have reached the root bus. */
 	if (bus->bridge)
-		acpi_dev_run_wake(bus->bridge, enable);
+		acpi_pm_device_run_wake(bus->bridge, enable);
 }
 
 static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
@@ -333,7 +299,7 @@
 	if (dev->pme_interrupt)
 		return 0;
 
-	if (!acpi_dev_run_wake(&dev->dev, enable))
+	if (!acpi_pm_device_run_wake(&dev->dev, enable))
 		return 0;
 
 	acpi_pci_propagate_run_wake(dev->bus, enable);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 4bdef24..b500840 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -508,9 +508,6 @@
 	int pos;
 	u32 reg32;
 
-	if (aspm_disabled)
-		return 0;
-
 	/*
 	 * Some functions in a slot might not all be PCIe functions,
 	 * very strange. Disable ASPM for the whole slot
@@ -519,6 +516,16 @@
 		pos = pci_pcie_cap(child);
 		if (!pos)
 			return -EINVAL;
+
+		/*
+		 * If ASPM is disabled then we're not going to change
+		 * the BIOS state. It's safe to continue even if it's a
+		 * pre-1.1 device
+		 */
+
+		if (aspm_disabled)
+			continue;
+
 		/*
 		 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
 		 * RBER bit to determine if a function is 1.1 version device
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index fd00ff0..d6cc62c 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -290,6 +290,7 @@
 		} else {
 			printk(KERN_DEBUG "enable msix get value %x\n",
 				op.value);
+			err = op.value;
 		}
 	} else {
 		dev_err(&dev->dev, "enable msix get err %x\n", err);
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 88a98cf..f7ba316e 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -609,25 +609,16 @@
 	bool ret = false;
 	u32 temp_limit;
 	u32 avg_power;
-	const char *msg = "MCP limit exceeded: ";
 
 	spin_lock_irqsave(&ips->turbo_status_lock, flags);
 
 	temp_limit = ips->mcp_temp_limit * 100;
-	if (ips->mcp_avg_temp > temp_limit) {
-		dev_info(&ips->dev->dev,
-			"%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
-			temp_limit);
+	if (ips->mcp_avg_temp > temp_limit)
 		ret = true;
-	}
 
 	avg_power = ips->cpu_avg_power + ips->mch_avg_power;
-	if (avg_power > ips->mcp_power_limit) {
-		dev_info(&ips->dev->dev,
-			"%sAvg power %u, limit %u\n", msg, avg_power,
-			ips->mcp_power_limit);
+	if (avg_power > ips->mcp_power_limit)
 		ret = true;
-	}
 
 	spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
 
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b00c176..d21e8f5 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -321,9 +321,14 @@
 {
 	struct acpi_device *acpi = to_acpi_device(dev);
 	struct pnp_dev *pnp = _pnp;
+	struct device *physical_device;
+
+	physical_device = acpi_get_physical_device(acpi->handle);
+	if (physical_device)
+		put_device(physical_device);
 
 	/* true means it matched */
-	return !acpi_get_physical_device(acpi->handle)
+	return !physical_device
 	    && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
 }
 
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 459f664..99dc29f 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -249,7 +249,7 @@
 	  Say Y here to enable support for TWL4030 Battery Charge Interface.
 
 config CHARGER_LP8727
-	tristate "National Semiconductor LP8727 charger driver"
+	tristate "TI/National Semiconductor LP8727 charger driver"
 	depends on I2C
 	help
 	  Say Y here to enable support for LP8727 Charger Driver.
@@ -288,4 +288,23 @@
 	  Say Y to enable support for the battery charger control sysfs and
 	  platform data of MAX8998/LP3974 PMICs.
 
+config CHARGER_SMB347
+	tristate "Summit Microelectronics SMB347 Battery Charger"
+	depends on I2C
+	help
+	  Say Y to include support for Summit Microelectronics SMB347
+	  Battery Charger.
+
+config AB8500_BM
+	bool "AB8500 Battery Management Driver"
+	depends on AB8500_CORE && AB8500_GPADC
+	help
+	  Say Y to include support for AB5500 battery management.
+
+config AB8500_BATTERY_THERM_ON_BATCTRL
+	bool "Thermistor connected on BATCTRL ADC"
+	depends on AB8500_BM
+	help
+	  Say Y to enable battery temperature measurements using
+	  thermistor connected on BATCTRL ADC.
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index c590fa5..b6b2434 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_CHARGER_PCF50633)	+= pcf50633-charger.o
 obj-$(CONFIG_BATTERY_JZ4740)	+= jz4740-battery.o
 obj-$(CONFIG_BATTERY_INTEL_MID)	+= intel_mid_battery.o
+obj-$(CONFIG_AB8500_BM)		+= ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o
 obj-$(CONFIG_CHARGER_ISP1704)	+= isp1704_charger.o
 obj-$(CONFIG_CHARGER_MAX8903)	+= max8903_charger.o
 obj-$(CONFIG_CHARGER_TWL4030)	+= twl4030_charger.o
@@ -42,3 +43,4 @@
 obj-$(CONFIG_CHARGER_MANAGER)	+= charger-manager.o
 obj-$(CONFIG_CHARGER_MAX8997)	+= max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)	+= max8998_charger.o
+obj-$(CONFIG_CHARGER_SMB347)	+= smb347-charger.o
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
new file mode 100644
index 0000000..d8bb993
--- /dev/null
+++ b/drivers/power/ab8500_btemp.c
@@ -0,0 +1,1124 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Battery temperature driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author:
+ *	Johan Palsson <johan.palsson@stericsson.com>
+ *	Karl Komierowski <karl.komierowski@stericsson.com>
+ *	Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/jiffies.h>
+
+#define VTVOUT_V			1800
+
+#define BTEMP_THERMAL_LOW_LIMIT		-10
+#define BTEMP_THERMAL_MED_LIMIT		0
+#define BTEMP_THERMAL_HIGH_LIMIT_52	52
+#define BTEMP_THERMAL_HIGH_LIMIT_57	57
+#define BTEMP_THERMAL_HIGH_LIMIT_62	62
+
+#define BTEMP_BATCTRL_CURR_SRC_7UA	7
+#define BTEMP_BATCTRL_CURR_SRC_20UA	20
+
+#define to_ab8500_btemp_device_info(x) container_of((x), \
+	struct ab8500_btemp, btemp_psy);
+
+/**
+ * struct ab8500_btemp_interrupts - ab8500 interrupts
+ * @name:	name of the interrupt
+ * @isr		function pointer to the isr
+ */
+struct ab8500_btemp_interrupts {
+	char *name;
+	irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_btemp_events {
+	bool batt_rem;
+	bool btemp_high;
+	bool btemp_medhigh;
+	bool btemp_lowmed;
+	bool btemp_low;
+	bool ac_conn;
+	bool usb_conn;
+};
+
+struct ab8500_btemp_ranges {
+	int btemp_high_limit;
+	int btemp_med_limit;
+	int btemp_low_limit;
+};
+
+/**
+ * struct ab8500_btemp - ab8500 BTEMP device information
+ * @dev:		Pointer to the structure device
+ * @node:		List of AB8500 BTEMPs, hence prepared for reentrance
+ * @curr_source:	What current source we use, in uA
+ * @bat_temp:		Battery temperature in degree Celcius
+ * @prev_bat_temp	Last dispatched battery temperature
+ * @parent:		Pointer to the struct ab8500
+ * @gpadc:		Pointer to the struct gpadc
+ * @fg:			Pointer to the struct fg
+ * @pdata:		Pointer to the abx500_btemp platform data
+ * @bat:		Pointer to the abx500_bm platform data
+ * @btemp_psy:		Structure for BTEMP specific battery properties
+ * @events:		Structure for information about events triggered
+ * @btemp_ranges:	Battery temperature range structure
+ * @btemp_wq:		Work queue for measuring the temperature periodically
+ * @btemp_periodic_work:	Work for measuring the temperature periodically
+ */
+struct ab8500_btemp {
+	struct device *dev;
+	struct list_head node;
+	int curr_source;
+	int bat_temp;
+	int prev_bat_temp;
+	struct ab8500 *parent;
+	struct ab8500_gpadc *gpadc;
+	struct ab8500_fg *fg;
+	struct abx500_btemp_platform_data *pdata;
+	struct abx500_bm_data *bat;
+	struct power_supply btemp_psy;
+	struct ab8500_btemp_events events;
+	struct ab8500_btemp_ranges btemp_ranges;
+	struct workqueue_struct *btemp_wq;
+	struct delayed_work btemp_periodic_work;
+};
+
+/* BTEMP power supply properties */
+static enum power_supply_property ab8500_btemp_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_TEMP,
+};
+
+static LIST_HEAD(ab8500_btemp_list);
+
+/**
+ * ab8500_btemp_get() - returns a reference to the primary AB8500 BTEMP
+ * (i.e. the first BTEMP in the instance list)
+ */
+struct ab8500_btemp *ab8500_btemp_get(void)
+{
+	struct ab8500_btemp *btemp;
+	btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
+
+	return btemp;
+}
+
+/**
+ * ab8500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
+ * @di:		pointer to the ab8500_btemp structure
+ * @v_batctrl:	measured batctrl voltage
+ * @inst_curr:	measured instant current
+ *
+ * This function returns the battery resistance that is
+ * derived from the BATCTRL voltage.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
+	int v_batctrl, int inst_curr)
+{
+	int rbs;
+
+	if (is_ab8500_1p1_or_earlier(di->parent)) {
+		/*
+		 * For ABB cut1.0 and 1.1 BAT_CTRL is internally
+		 * connected to 1.8V through a 450k resistor
+		 */
+		return (450000 * (v_batctrl)) / (1800 - v_batctrl);
+	}
+
+	if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+		/*
+		 * If the battery has internal NTC, we use the current
+		 * source to calculate the resistance, 7uA or 20uA
+		 */
+		rbs = (v_batctrl * 1000
+		       - di->bat->gnd_lift_resistance * inst_curr)
+		      / di->curr_source;
+	} else {
+		/*
+		 * BAT_CTRL is internally
+		 * connected to 1.8V through a 80k resistor
+		 */
+		rbs = (80000 * (v_batctrl)) / (1800 - v_batctrl);
+	}
+
+	return rbs;
+}
+
+/**
+ * ab8500_btemp_read_batctrl_voltage() - measure batctrl voltage
+ * @di:		pointer to the ab8500_btemp structure
+ *
+ * This function returns the voltage on BATCTRL. Returns value in mV.
+ */
+static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
+{
+	int vbtemp;
+	static int prev;
+
+	vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
+	if (vbtemp < 0) {
+		dev_err(di->dev,
+			"%s gpadc conversion failed, using previous value",
+			__func__);
+		return prev;
+	}
+	prev = vbtemp;
+	return vbtemp;
+}
+
+/**
+ * ab8500_btemp_curr_source_enable() - enable/disable batctrl current source
+ * @di:		pointer to the ab8500_btemp structure
+ * @enable:	enable or disable the current source
+ *
+ * Enable or disable the current sources for the BatCtrl AD channel
+ */
+static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
+	bool enable)
+{
+	int curr;
+	int ret = 0;
+
+	/*
+	 * BATCTRL current sources are included on AB8500 cut2.0
+	 * and future versions
+	 */
+	if (is_ab8500_1p1_or_earlier(di->parent))
+		return 0;
+
+	/* Only do this for batteries with internal NTC */
+	if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+		if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
+			curr = BAT_CTRL_7U_ENA;
+		else
+			curr = BAT_CTRL_20U_ENA;
+
+		dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
+
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+			FORCE_BAT_CTRL_CMP_HIGH, FORCE_BAT_CTRL_CMP_HIGH);
+		if (ret) {
+			dev_err(di->dev, "%s failed setting cmp_force\n",
+				__func__);
+			return ret;
+		}
+
+		/*
+		 * We have to wait one 32kHz cycle before enabling
+		 * the current source, since ForceBatCtrlCmpHigh needs
+		 * to be written in a separate cycle
+		 */
+		udelay(32);
+
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+			FORCE_BAT_CTRL_CMP_HIGH | curr);
+		if (ret) {
+			dev_err(di->dev, "%s failed enabling current source\n",
+				__func__);
+			goto disable_curr_source;
+		}
+	} else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+		dev_dbg(di->dev, "Disable BATCTRL curr source\n");
+
+		/* Write 0 to the curr bits */
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+			BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+			~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+		if (ret) {
+			dev_err(di->dev, "%s failed disabling current source\n",
+				__func__);
+			goto disable_curr_source;
+		}
+
+		/* Enable Pull-Up and comparator */
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER,	AB8500_BAT_CTRL_CURRENT_SOURCE,
+			BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+			BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+		if (ret) {
+			dev_err(di->dev, "%s failed enabling PU and comp\n",
+				__func__);
+			goto enable_pu_comp;
+		}
+
+		/*
+		 * We have to wait one 32kHz cycle before disabling
+		 * ForceBatCtrlCmpHigh since this needs to be written
+		 * in a separate cycle
+		 */
+		udelay(32);
+
+		/* Disable 'force comparator' */
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+			FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+		if (ret) {
+			dev_err(di->dev, "%s failed disabling force comp\n",
+				__func__);
+			goto disable_force_comp;
+		}
+	}
+	return ret;
+
+	/*
+	 * We have to try unsetting FORCE_BAT_CTRL_CMP_HIGH one more time
+	 * if we got an error above
+	 */
+disable_curr_source:
+	/* Write 0 to the curr bits */
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+			BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+			~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+	if (ret) {
+		dev_err(di->dev, "%s failed disabling current source\n",
+			__func__);
+		return ret;
+	}
+enable_pu_comp:
+	/* Enable Pull-Up and comparator */
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+		AB8500_CHARGER,	AB8500_BAT_CTRL_CURRENT_SOURCE,
+		BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+		BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+	if (ret) {
+		dev_err(di->dev, "%s failed enabling PU and comp\n",
+			__func__);
+		return ret;
+	}
+
+disable_force_comp:
+	/*
+	 * We have to wait one 32kHz cycle before disabling
+	 * ForceBatCtrlCmpHigh since this needs to be written
+	 * in a separate cycle
+	 */
+	udelay(32);
+
+	/* Disable 'force comparator' */
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+		AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+		FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+	if (ret) {
+		dev_err(di->dev, "%s failed disabling force comp\n",
+			__func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_res() - get battery resistance
+ * @di:		pointer to the ab8500_btemp structure
+ *
+ * This function returns the battery pack identification resistance.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
+{
+	int ret;
+	int batctrl = 0;
+	int res;
+	int inst_curr;
+	int i;
+
+	/*
+	 * BATCTRL current sources are included on AB8500 cut2.0
+	 * and future versions
+	 */
+	ret = ab8500_btemp_curr_source_enable(di, true);
+	if (ret) {
+		dev_err(di->dev, "%s curr source enabled failed\n", __func__);
+		return ret;
+	}
+
+	if (!di->fg)
+		di->fg = ab8500_fg_get();
+	if (!di->fg) {
+		dev_err(di->dev, "No fg found\n");
+		return -EINVAL;
+	}
+
+	ret = ab8500_fg_inst_curr_start(di->fg);
+
+	if (ret) {
+		dev_err(di->dev, "Failed to start current measurement\n");
+		return ret;
+	}
+
+	/*
+	 * Since there is no interrupt when current measurement is done,
+	 * loop for over 250ms (250ms is one sample conversion time
+	 * with 32.768 Khz RTC clock). Note that a stop time must be set
+	 * since the ab8500_btemp_read_batctrl_voltage call can block and
+	 * take an unknown amount of time to complete.
+	 */
+	i = 0;
+
+	do {
+		batctrl += ab8500_btemp_read_batctrl_voltage(di);
+		i++;
+		msleep(20);
+	} while (!ab8500_fg_inst_curr_done(di->fg));
+	batctrl /= i;
+
+	ret = ab8500_fg_inst_curr_finalize(di->fg, &inst_curr);
+	if (ret) {
+		dev_err(di->dev, "Failed to finalize current measurement\n");
+		return ret;
+	}
+
+	res = ab8500_btemp_batctrl_volt_to_res(di, batctrl, inst_curr);
+
+	ret = ab8500_btemp_curr_source_enable(di, false);
+	if (ret) {
+		dev_err(di->dev, "%s curr source disable failed\n", __func__);
+		return ret;
+	}
+
+	dev_dbg(di->dev, "%s batctrl: %d res: %d inst_curr: %d samples: %d\n",
+		__func__, batctrl, res, inst_curr, i);
+
+	return res;
+}
+
+/**
+ * ab8500_btemp_res_to_temp() - resistance to temperature
+ * @di:		pointer to the ab8500_btemp structure
+ * @tbl:	pointer to the resiatance to temperature table
+ * @tbl_size:	size of the resistance to temperature table
+ * @res:	resistance to calculate the temperature from
+ *
+ * This function returns the battery temperature in degrees Celcius
+ * based on the NTC resistance.
+ */
+static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
+	const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+{
+	int i, temp;
+	/*
+	 * Calculate the formula for the straight line
+	 * Simple interpolation if we are within
+	 * the resistance table limits, extrapolate
+	 * if resistance is outside the limits.
+	 */
+	if (res > tbl[0].resist)
+		i = 0;
+	else if (res <= tbl[tbl_size - 1].resist)
+		i = tbl_size - 2;
+	else {
+		i = 0;
+		while (!(res <= tbl[i].resist &&
+			res > tbl[i + 1].resist))
+			i++;
+	}
+
+	temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+		(res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+	return temp;
+}
+
+/**
+ * ab8500_btemp_measure_temp() - measure battery temperature
+ * @di:		pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature (on success) else the previous temperature
+ */
+static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
+{
+	int temp;
+	static int prev;
+	int rbat, rntc, vntc;
+	u8 id;
+
+	id = di->bat->batt_id;
+
+	if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+			id != BATTERY_UNKNOWN) {
+
+		rbat = ab8500_btemp_get_batctrl_res(di);
+		if (rbat < 0) {
+			dev_err(di->dev, "%s get batctrl res failed\n",
+				__func__);
+			/*
+			 * Return out-of-range temperature so that
+			 * charging is stopped
+			 */
+			return BTEMP_THERMAL_LOW_LIMIT;
+		}
+
+		temp = ab8500_btemp_res_to_temp(di,
+			di->bat->bat_type[id].r_to_t_tbl,
+			di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+	} else {
+		vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
+		if (vntc < 0) {
+			dev_err(di->dev,
+				"%s gpadc conversion failed,"
+				" using previous value\n", __func__);
+			return prev;
+		}
+		/*
+		 * The PCB NTC is sourced from VTVOUT via a 230kOhm
+		 * resistor.
+		 */
+		rntc = 230000 * vntc / (VTVOUT_V - vntc);
+
+		temp = ab8500_btemp_res_to_temp(di,
+			di->bat->bat_type[id].r_to_t_tbl,
+			di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+		prev = temp;
+	}
+	dev_dbg(di->dev, "Battery temperature is %d\n", temp);
+	return temp;
+}
+
+/**
+ * ab8500_btemp_id() - Identify the connected battery
+ * @di:		pointer to the ab8500_btemp structure
+ *
+ * This function will try to identify the battery by reading the ID
+ * resistor. Some brands use a combined ID resistor with a NTC resistor to
+ * both be able to identify and to read the temperature of it.
+ */
+static int ab8500_btemp_id(struct ab8500_btemp *di)
+{
+	int res;
+	u8 i;
+
+	di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+	di->bat->batt_id = BATTERY_UNKNOWN;
+
+	res =  ab8500_btemp_get_batctrl_res(di);
+	if (res < 0) {
+		dev_err(di->dev, "%s get batctrl res failed\n", __func__);
+		return -ENXIO;
+	}
+
+	/* BATTERY_UNKNOWN is defined on position 0, skip it! */
+	for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
+		if ((res <= di->bat->bat_type[i].resis_high) &&
+			(res >= di->bat->bat_type[i].resis_low)) {
+			dev_dbg(di->dev, "Battery detected on %s"
+				" low %d < res %d < high: %d"
+				" index: %d\n",
+				di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+				"BATCTRL" : "BATTEMP",
+				di->bat->bat_type[i].resis_low, res,
+				di->bat->bat_type[i].resis_high, i);
+
+			di->bat->batt_id = i;
+			break;
+		}
+	}
+
+	if (di->bat->batt_id == BATTERY_UNKNOWN) {
+		dev_warn(di->dev, "Battery identified as unknown"
+			", resistance %d Ohm\n", res);
+		return -ENXIO;
+	}
+
+	/*
+	 * We only have to change current source if the
+	 * detected type is Type 1, else we use the 7uA source
+	 */
+	if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+			di->bat->batt_id == 1) {
+		dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
+		di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+	}
+
+	return di->bat->batt_id;
+}
+
+/**
+ * ab8500_btemp_periodic_work() - Measuring the temperature periodically
+ * @work:	pointer to the work_struct structure
+ *
+ * Work function for measuring the temperature periodically
+ */
+static void ab8500_btemp_periodic_work(struct work_struct *work)
+{
+	int interval;
+	struct ab8500_btemp *di = container_of(work,
+		struct ab8500_btemp, btemp_periodic_work.work);
+
+	di->bat_temp = ab8500_btemp_measure_temp(di);
+
+	if (di->bat_temp != di->prev_bat_temp) {
+		di->prev_bat_temp = di->bat_temp;
+		power_supply_changed(&di->btemp_psy);
+	}
+
+	if (di->events.ac_conn || di->events.usb_conn)
+		interval = di->bat->temp_interval_chg;
+	else
+		interval = di->bat->temp_interval_nochg;
+
+	/* Schedule a new measurement */
+	queue_delayed_work(di->btemp_wq,
+		&di->btemp_periodic_work,
+		round_jiffies(interval * HZ));
+}
+
+/**
+ * ab8500_btemp_batctrlindb_handler() - battery removal detected
+ * @irq:       interrupt number
+ * @_di:       void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_batctrlindb_handler(int irq, void *_di)
+{
+	struct ab8500_btemp *di = _di;
+	dev_err(di->dev, "Battery removal detected!\n");
+
+	di->events.batt_rem = true;
+	power_supply_changed(&di->btemp_psy);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_templow_handler() - battery temp lower than 10 degrees
+ * @irq:       interrupt number
+ * @_di:       void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
+{
+	struct ab8500_btemp *di = _di;
+
+	if (is_ab8500_2p0_or_earlier(di->parent)) {
+		dev_dbg(di->dev, "Ignore false btemp low irq"
+			" for ABB cut 1.0, 1.1 and 2.0\n");
+	} else {
+		dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
+
+		di->events.btemp_low = true;
+		di->events.btemp_high = false;
+		di->events.btemp_medhigh = false;
+		di->events.btemp_lowmed = false;
+		power_supply_changed(&di->btemp_psy);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_temphigh_handler() - battery temp higher than max temp
+ * @irq:       interrupt number
+ * @_di:       void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_temphigh_handler(int irq, void *_di)
+{
+	struct ab8500_btemp *di = _di;
+
+	dev_crit(di->dev, "Battery temperature is higher than MAX temp\n");
+
+	di->events.btemp_high = true;
+	di->events.btemp_medhigh = false;
+	di->events.btemp_lowmed = false;
+	di->events.btemp_low = false;
+	power_supply_changed(&di->btemp_psy);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_lowmed_handler() - battery temp between low and medium
+ * @irq:       interrupt number
+ * @_di:       void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_lowmed_handler(int irq, void *_di)
+{
+	struct ab8500_btemp *di = _di;
+
+	dev_dbg(di->dev, "Battery temperature is between low and medium\n");
+
+	di->events.btemp_lowmed = true;
+	di->events.btemp_medhigh = false;
+	di->events.btemp_high = false;
+	di->events.btemp_low = false;
+	power_supply_changed(&di->btemp_psy);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_medhigh_handler() - battery temp between medium and high
+ * @irq:       interrupt number
+ * @_di:       void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_medhigh_handler(int irq, void *_di)
+{
+	struct ab8500_btemp *di = _di;
+
+	dev_dbg(di->dev, "Battery temperature is between medium and high\n");
+
+	di->events.btemp_medhigh = true;
+	di->events.btemp_lowmed = false;
+	di->events.btemp_high = false;
+	di->events.btemp_low = false;
+	power_supply_changed(&di->btemp_psy);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_periodic() - Periodic temperature measurements
+ * @di:		pointer to the ab8500_btemp structure
+ * @enable:	enable or disable periodic temperature measurements
+ *
+ * Starts of stops periodic temperature measurements. Periodic measurements
+ * should only be done when a charger is connected.
+ */
+static void ab8500_btemp_periodic(struct ab8500_btemp *di,
+	bool enable)
+{
+	dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
+		enable);
+	/*
+	 * Make sure a new measurement is done directly by cancelling
+	 * any pending work
+	 */
+	cancel_delayed_work_sync(&di->btemp_periodic_work);
+
+	if (enable)
+		queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+}
+
+/**
+ * ab8500_btemp_get_temp() - get battery temperature
+ * @di:		pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature
+ */
+static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
+{
+	int temp = 0;
+
+	/*
+	 * The BTEMP events are not reliabe on AB8500 cut2.0
+	 * and prior versions
+	 */
+	if (is_ab8500_2p0_or_earlier(di->parent)) {
+		temp = di->bat_temp * 10;
+	} else {
+		if (di->events.btemp_low) {
+			if (temp > di->btemp_ranges.btemp_low_limit)
+				temp = di->btemp_ranges.btemp_low_limit;
+			else
+				temp = di->bat_temp * 10;
+		} else if (di->events.btemp_high) {
+			if (temp < di->btemp_ranges.btemp_high_limit)
+				temp = di->btemp_ranges.btemp_high_limit;
+			else
+				temp = di->bat_temp * 10;
+		} else if (di->events.btemp_lowmed) {
+			if (temp > di->btemp_ranges.btemp_med_limit)
+				temp = di->btemp_ranges.btemp_med_limit;
+			else
+				temp = di->bat_temp * 10;
+		} else if (di->events.btemp_medhigh) {
+			if (temp < di->btemp_ranges.btemp_med_limit)
+				temp = di->btemp_ranges.btemp_med_limit;
+			else
+				temp = di->bat_temp * 10;
+		} else
+			temp = di->bat_temp * 10;
+	}
+	return temp;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_temp() - get the temperature
+ * @btemp:      pointer to the btemp structure
+ *
+ * Returns the batctrl temperature in millidegrees
+ */
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+	return btemp->bat_temp * 1000;
+}
+
+/**
+ * ab8500_btemp_get_property() - get the btemp properties
+ * @psy:        pointer to the power_supply structure
+ * @psp:        pointer to the power_supply_property structure
+ * @val:        pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the btemp
+ * properties by reading the sysfs files.
+ * online:	presence of the battery
+ * present:	presence of the battery
+ * technology:	battery technology
+ * temp:	battery temperature
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_btemp_get_property(struct power_supply *psy,
+	enum power_supply_property psp,
+	union power_supply_propval *val)
+{
+	struct ab8500_btemp *di;
+
+	di = to_ab8500_btemp_device_info(psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+	case POWER_SUPPLY_PROP_ONLINE:
+		if (di->events.batt_rem)
+			val->intval = 0;
+		else
+			val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = di->bat->bat_type[di->bat->batt_id].name;
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = ab8500_btemp_get_temp(di);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
+{
+	struct power_supply *psy;
+	struct power_supply *ext;
+	struct ab8500_btemp *di;
+	union power_supply_propval ret;
+	int i, j;
+	bool psy_found = false;
+
+	psy = (struct power_supply *)data;
+	ext = dev_get_drvdata(dev);
+	di = to_ab8500_btemp_device_info(psy);
+
+	/*
+	 * For all psy where the name of your driver
+	 * appears in any supplied_to
+	 */
+	for (i = 0; i < ext->num_supplicants; i++) {
+		if (!strcmp(ext->supplied_to[i], psy->name))
+			psy_found = true;
+	}
+
+	if (!psy_found)
+		return 0;
+
+	/* Go through all properties for the psy */
+	for (j = 0; j < ext->num_properties; j++) {
+		enum power_supply_property prop;
+		prop = ext->properties[j];
+
+		if (ext->get_property(ext, prop, &ret))
+			continue;
+
+		switch (prop) {
+		case POWER_SUPPLY_PROP_PRESENT:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_MAINS:
+				/* AC disconnected */
+				if (!ret.intval && di->events.ac_conn) {
+					di->events.ac_conn = false;
+				}
+				/* AC connected */
+				else if (ret.intval && !di->events.ac_conn) {
+					di->events.ac_conn = true;
+					if (!di->events.usb_conn)
+						ab8500_btemp_periodic(di, true);
+				}
+				break;
+			case POWER_SUPPLY_TYPE_USB:
+				/* USB disconnected */
+				if (!ret.intval && di->events.usb_conn) {
+					di->events.usb_conn = false;
+				}
+				/* USB connected */
+				else if (ret.intval && !di->events.usb_conn) {
+					di->events.usb_conn = true;
+					if (!di->events.ac_conn)
+						ab8500_btemp_periodic(di, true);
+				}
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * ab8500_btemp_external_power_changed() - callback for power supply changes
+ * @psy:       pointer to the structure power_supply
+ *
+ * This function is pointing to the function pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in the external power
+ * supply to the btemp.
+ */
+static void ab8500_btemp_external_power_changed(struct power_supply *psy)
+{
+	struct ab8500_btemp *di = to_ab8500_btemp_device_info(psy);
+
+	class_for_each_device(power_supply_class, NULL,
+		&di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+}
+
+/* ab8500 btemp driver interrupts and their respective isr */
+static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
+	{"BAT_CTRL_INDB", ab8500_btemp_batctrlindb_handler},
+	{"BTEMP_LOW", ab8500_btemp_templow_handler},
+	{"BTEMP_HIGH", ab8500_btemp_temphigh_handler},
+	{"BTEMP_LOW_MEDIUM", ab8500_btemp_lowmed_handler},
+	{"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
+};
+
+#if defined(CONFIG_PM)
+static int ab8500_btemp_resume(struct platform_device *pdev)
+{
+	struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+	ab8500_btemp_periodic(di, true);
+
+	return 0;
+}
+
+static int ab8500_btemp_suspend(struct platform_device *pdev,
+	pm_message_t state)
+{
+	struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+	ab8500_btemp_periodic(di, false);
+
+	return 0;
+}
+#else
+#define ab8500_btemp_suspend      NULL
+#define ab8500_btemp_resume       NULL
+#endif
+
+static int __devexit ab8500_btemp_remove(struct platform_device *pdev)
+{
+	struct ab8500_btemp *di = platform_get_drvdata(pdev);
+	int i, irq;
+
+	/* Disable interrupts */
+	for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+		irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+		free_irq(irq, di);
+	}
+
+	/* Delete the work queue */
+	destroy_workqueue(di->btemp_wq);
+
+	flush_scheduled_work();
+	power_supply_unregister(&di->btemp_psy);
+	platform_set_drvdata(pdev, NULL);
+	kfree(di);
+
+	return 0;
+}
+
+static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
+{
+	int irq, i, ret = 0;
+	u8 val;
+	struct abx500_bm_plat_data *plat_data;
+
+	struct ab8500_btemp *di =
+		kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+	if (!di)
+		return -ENOMEM;
+
+	/* get parent data */
+	di->dev = &pdev->dev;
+	di->parent = dev_get_drvdata(pdev->dev.parent);
+	di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+	/* get btemp specific platform data */
+	plat_data = pdev->dev.platform_data;
+	di->pdata = plat_data->btemp;
+	if (!di->pdata) {
+		dev_err(di->dev, "no btemp platform data supplied\n");
+		ret = -EINVAL;
+		goto free_device_info;
+	}
+
+	/* get battery specific platform data */
+	di->bat = plat_data->battery;
+	if (!di->bat) {
+		dev_err(di->dev, "no battery platform data supplied\n");
+		ret = -EINVAL;
+		goto free_device_info;
+	}
+
+	/* BTEMP supply */
+	di->btemp_psy.name = "ab8500_btemp";
+	di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+	di->btemp_psy.properties = ab8500_btemp_props;
+	di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
+	di->btemp_psy.get_property = ab8500_btemp_get_property;
+	di->btemp_psy.supplied_to = di->pdata->supplied_to;
+	di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+	di->btemp_psy.external_power_changed =
+		ab8500_btemp_external_power_changed;
+
+
+	/* Create a work queue for the btemp */
+	di->btemp_wq =
+		create_singlethread_workqueue("ab8500_btemp_wq");
+	if (di->btemp_wq == NULL) {
+		dev_err(di->dev, "failed to create work queue\n");
+		goto free_device_info;
+	}
+
+	/* Init work for measuring temperature periodically */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+		ab8500_btemp_periodic_work);
+
+	/* Identify the battery */
+	if (ab8500_btemp_id(di) < 0)
+		dev_warn(di->dev, "failed to identify the battery\n");
+
+	/* Set BTEMP thermal limits. Low and Med are fixed */
+	di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
+	di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_BTEMP_HIGH_TH, &val);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		goto free_btemp_wq;
+	}
+	switch (val) {
+	case BTEMP_HIGH_TH_57_0:
+	case BTEMP_HIGH_TH_57_1:
+		di->btemp_ranges.btemp_high_limit =
+			BTEMP_THERMAL_HIGH_LIMIT_57;
+		break;
+	case BTEMP_HIGH_TH_52:
+		di->btemp_ranges.btemp_high_limit =
+			BTEMP_THERMAL_HIGH_LIMIT_52;
+		break;
+	case BTEMP_HIGH_TH_62:
+		di->btemp_ranges.btemp_high_limit =
+			BTEMP_THERMAL_HIGH_LIMIT_62;
+		break;
+	}
+
+	/* Register BTEMP power supply class */
+	ret = power_supply_register(di->dev, &di->btemp_psy);
+	if (ret) {
+		dev_err(di->dev, "failed to register BTEMP psy\n");
+		goto free_btemp_wq;
+	}
+
+	/* Register interrupts */
+	for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+		irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+		ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
+			IRQF_SHARED | IRQF_NO_SUSPEND,
+			ab8500_btemp_irq[i].name, di);
+
+		if (ret) {
+			dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+				, ab8500_btemp_irq[i].name, irq, ret);
+			goto free_irq;
+		}
+		dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+			ab8500_btemp_irq[i].name, irq, ret);
+	}
+
+	platform_set_drvdata(pdev, di);
+
+	/* Kick off periodic temperature measurements */
+	ab8500_btemp_periodic(di, true);
+	list_add_tail(&di->node, &ab8500_btemp_list);
+
+	return ret;
+
+free_irq:
+	power_supply_unregister(&di->btemp_psy);
+
+	/* We also have to free all successfully registered irqs */
+	for (i = i - 1; i >= 0; i--) {
+		irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+		free_irq(irq, di);
+	}
+free_btemp_wq:
+	destroy_workqueue(di->btemp_wq);
+free_device_info:
+	kfree(di);
+
+	return ret;
+}
+
+static struct platform_driver ab8500_btemp_driver = {
+	.probe = ab8500_btemp_probe,
+	.remove = __devexit_p(ab8500_btemp_remove),
+	.suspend = ab8500_btemp_suspend,
+	.resume = ab8500_btemp_resume,
+	.driver = {
+		.name = "ab8500-btemp",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init ab8500_btemp_init(void)
+{
+	return platform_driver_register(&ab8500_btemp_driver);
+}
+
+static void __exit ab8500_btemp_exit(void)
+{
+	platform_driver_unregister(&ab8500_btemp_driver);
+}
+
+subsys_initcall_sync(ab8500_btemp_init);
+module_exit(ab8500_btemp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-btemp");
+MODULE_DESCRIPTION("AB8500 battery temperature driver");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
new file mode 100644
index 0000000..e2b4acc
--- /dev/null
+++ b/drivers/power/ab8500_charger.c
@@ -0,0 +1,2789 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Charger driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author:
+ *	Johan Palsson <johan.palsson@stericsson.com>
+ *	Karl Komierowski <karl.komierowski@stericsson.com>
+ *	Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/usb/otg.h>
+
+/* Charger constants */
+#define NO_PW_CONN			0
+#define AC_PW_CONN			1
+#define USB_PW_CONN			2
+
+#define MAIN_WDOG_ENA			0x01
+#define MAIN_WDOG_KICK			0x02
+#define MAIN_WDOG_DIS			0x00
+#define CHARG_WD_KICK			0x01
+#define MAIN_CH_ENA			0x01
+#define MAIN_CH_NO_OVERSHOOT_ENA_N	0x02
+#define USB_CH_ENA			0x01
+#define USB_CHG_NO_OVERSHOOT_ENA_N	0x02
+#define MAIN_CH_DET			0x01
+#define MAIN_CH_CV_ON			0x04
+#define USB_CH_CV_ON			0x08
+#define VBUS_DET_DBNC100		0x02
+#define VBUS_DET_DBNC1			0x01
+#define OTP_ENABLE_WD			0x01
+
+#define MAIN_CH_INPUT_CURR_SHIFT	4
+#define VBUS_IN_CURR_LIM_SHIFT		4
+
+#define LED_INDICATOR_PWM_ENA		0x01
+#define LED_INDICATOR_PWM_DIS		0x00
+#define LED_IND_CUR_5MA			0x04
+#define LED_INDICATOR_PWM_DUTY_252_256	0xBF
+
+/* HW failure constants */
+#define MAIN_CH_TH_PROT			0x02
+#define VBUS_CH_NOK			0x08
+#define USB_CH_TH_PROT			0x02
+#define VBUS_OVV_TH			0x01
+#define MAIN_CH_NOK			0x01
+#define VBUS_DET			0x80
+
+/* UsbLineStatus register bit masks */
+#define AB8500_USB_LINK_STATUS		0x78
+#define AB8500_STD_HOST_SUSP		0x18
+
+/* Watchdog timeout constant */
+#define WD_TIMER			0x30 /* 4min */
+#define WD_KICK_INTERVAL		(60 * HZ)
+
+/* Lowest charger voltage is 3.39V -> 0x4E */
+#define LOW_VOLT_REG			0x4E
+
+/* UsbLineStatus register - usb types */
+enum ab8500_charger_link_status {
+	USB_STAT_NOT_CONFIGURED,
+	USB_STAT_STD_HOST_NC,
+	USB_STAT_STD_HOST_C_NS,
+	USB_STAT_STD_HOST_C_S,
+	USB_STAT_HOST_CHG_NM,
+	USB_STAT_HOST_CHG_HS,
+	USB_STAT_HOST_CHG_HS_CHIRP,
+	USB_STAT_DEDICATED_CHG,
+	USB_STAT_ACA_RID_A,
+	USB_STAT_ACA_RID_B,
+	USB_STAT_ACA_RID_C_NM,
+	USB_STAT_ACA_RID_C_HS,
+	USB_STAT_ACA_RID_C_HS_CHIRP,
+	USB_STAT_HM_IDGND,
+	USB_STAT_RESERVED,
+	USB_STAT_NOT_VALID_LINK,
+};
+
+enum ab8500_usb_state {
+	AB8500_BM_USB_STATE_RESET_HS,	/* HighSpeed Reset */
+	AB8500_BM_USB_STATE_RESET_FS,	/* FullSpeed/LowSpeed Reset */
+	AB8500_BM_USB_STATE_CONFIGURED,
+	AB8500_BM_USB_STATE_SUSPEND,
+	AB8500_BM_USB_STATE_RESUME,
+	AB8500_BM_USB_STATE_MAX,
+};
+
+/* VBUS input current limits supported in AB8500 in mA */
+#define USB_CH_IP_CUR_LVL_0P05		50
+#define USB_CH_IP_CUR_LVL_0P09		98
+#define USB_CH_IP_CUR_LVL_0P19		193
+#define USB_CH_IP_CUR_LVL_0P29		290
+#define USB_CH_IP_CUR_LVL_0P38		380
+#define USB_CH_IP_CUR_LVL_0P45		450
+#define USB_CH_IP_CUR_LVL_0P5		500
+#define USB_CH_IP_CUR_LVL_0P6		600
+#define USB_CH_IP_CUR_LVL_0P7		700
+#define USB_CH_IP_CUR_LVL_0P8		800
+#define USB_CH_IP_CUR_LVL_0P9		900
+#define USB_CH_IP_CUR_LVL_1P0		1000
+#define USB_CH_IP_CUR_LVL_1P1		1100
+#define USB_CH_IP_CUR_LVL_1P3		1300
+#define USB_CH_IP_CUR_LVL_1P4		1400
+#define USB_CH_IP_CUR_LVL_1P5		1500
+
+#define VBAT_TRESH_IP_CUR_RED		3800
+
+#define to_ab8500_charger_usb_device_info(x) container_of((x), \
+	struct ab8500_charger, usb_chg)
+#define to_ab8500_charger_ac_device_info(x) container_of((x), \
+	struct ab8500_charger, ac_chg)
+
+/**
+ * struct ab8500_charger_interrupts - ab8500 interupts
+ * @name:	name of the interrupt
+ * @isr		function pointer to the isr
+ */
+struct ab8500_charger_interrupts {
+	char *name;
+	irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_charger_info {
+	int charger_connected;
+	int charger_online;
+	int charger_voltage;
+	int cv_active;
+	bool wd_expired;
+};
+
+struct ab8500_charger_event_flags {
+	bool mainextchnotok;
+	bool main_thermal_prot;
+	bool usb_thermal_prot;
+	bool vbus_ovv;
+	bool usbchargernotok;
+	bool chgwdexp;
+	bool vbus_collapse;
+};
+
+struct ab8500_charger_usb_state {
+	bool usb_changed;
+	int usb_current;
+	enum ab8500_usb_state state;
+	spinlock_t usb_lock;
+};
+
+/**
+ * struct ab8500_charger - ab8500 Charger device information
+ * @dev:		Pointer to the structure device
+ * @max_usb_in_curr:	Max USB charger input current
+ * @vbus_detected:	VBUS detected
+ * @vbus_detected_start:
+ *			VBUS detected during startup
+ * @ac_conn:		This will be true when the AC charger has been plugged
+ * @vddadc_en_ac:	Indicate if VDD ADC supply is enabled because AC
+ *			charger is enabled
+ * @vddadc_en_usb:	Indicate if VDD ADC supply is enabled because USB
+ *			charger is enabled
+ * @vbat		Battery voltage
+ * @old_vbat		Previously measured battery voltage
+ * @autopower		Indicate if we should have automatic pwron after pwrloss
+ * @parent:		Pointer to the struct ab8500
+ * @gpadc:		Pointer to the struct gpadc
+ * @pdata:		Pointer to the abx500_charger platform data
+ * @bat:		Pointer to the abx500_bm platform data
+ * @flags:		Structure for information about events triggered
+ * @usb_state:		Structure for usb stack information
+ * @ac_chg:		AC charger power supply
+ * @usb_chg:		USB charger power supply
+ * @ac:			Structure that holds the AC charger properties
+ * @usb:		Structure that holds the USB charger properties
+ * @regu:		Pointer to the struct regulator
+ * @charger_wq:		Work queue for the IRQs and checking HW state
+ * @check_vbat_work	Work for checking vbat threshold to adjust vbus current
+ * @check_hw_failure_work:	Work for checking HW state
+ * @check_usbchgnotok_work:	Work for checking USB charger not ok status
+ * @kick_wd_work:		Work for kicking the charger watchdog in case
+ *				of ABB rev 1.* due to the watchog logic bug
+ * @ac_work:			Work for checking AC charger connection
+ * @detect_usb_type_work:	Work for detecting the USB type connected
+ * @usb_link_status_work:	Work for checking the new USB link status
+ * @usb_state_changed_work:	Work for checking USB state
+ * @check_main_thermal_prot_work:
+ *				Work for checking Main thermal status
+ * @check_usb_thermal_prot_work:
+ *				Work for checking USB thermal status
+ */
+struct ab8500_charger {
+	struct device *dev;
+	int max_usb_in_curr;
+	bool vbus_detected;
+	bool vbus_detected_start;
+	bool ac_conn;
+	bool vddadc_en_ac;
+	bool vddadc_en_usb;
+	int vbat;
+	int old_vbat;
+	bool autopower;
+	struct ab8500 *parent;
+	struct ab8500_gpadc *gpadc;
+	struct abx500_charger_platform_data *pdata;
+	struct abx500_bm_data *bat;
+	struct ab8500_charger_event_flags flags;
+	struct ab8500_charger_usb_state usb_state;
+	struct ux500_charger ac_chg;
+	struct ux500_charger usb_chg;
+	struct ab8500_charger_info ac;
+	struct ab8500_charger_info usb;
+	struct regulator *regu;
+	struct workqueue_struct *charger_wq;
+	struct delayed_work check_vbat_work;
+	struct delayed_work check_hw_failure_work;
+	struct delayed_work check_usbchgnotok_work;
+	struct delayed_work kick_wd_work;
+	struct work_struct ac_work;
+	struct work_struct detect_usb_type_work;
+	struct work_struct usb_link_status_work;
+	struct work_struct usb_state_changed_work;
+	struct work_struct check_main_thermal_prot_work;
+	struct work_struct check_usb_thermal_prot_work;
+	struct usb_phy *usb_phy;
+	struct notifier_block nb;
+};
+
+/* AC properties */
+static enum power_supply_property ab8500_charger_ac_props[] = {
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/* USB properties */
+static enum power_supply_property ab8500_charger_usb_props[] = {
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/**
+ * ab8500_power_loss_handling - set how we handle powerloss.
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Magic nummbers are from STE HW department.
+ */
+static void ab8500_power_loss_handling(struct ab8500_charger *di)
+{
+	u8 reg;
+	int ret;
+
+	dev_dbg(di->dev, "Autopower : %d\n", di->autopower);
+
+	/* read the autopower register */
+	ret = abx500_get_register_interruptible(di->dev, 0x15, 0x00, &reg);
+	if (ret) {
+		dev_err(di->dev, "%d write failed\n", __LINE__);
+		return;
+	}
+
+	/* enable the OPT emulation registers */
+	ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
+	if (ret) {
+		dev_err(di->dev, "%d write failed\n", __LINE__);
+		return;
+	}
+
+	if (di->autopower)
+		reg |= 0x8;
+	else
+		reg &= ~0x8;
+
+	/* write back the changed value to autopower reg */
+	ret = abx500_set_register_interruptible(di->dev, 0x15, 0x00, reg);
+	if (ret) {
+		dev_err(di->dev, "%d write failed\n", __LINE__);
+		return;
+	}
+
+	/* disable the set OTP registers again */
+	ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
+	if (ret) {
+		dev_err(di->dev, "%d write failed\n", __LINE__);
+		return;
+	}
+}
+
+/**
+ * ab8500_power_supply_changed - a wrapper with local extentions for
+ * power_supply_changed
+ * @di:	  pointer to the ab8500_charger structure
+ * @psy:  pointer to power_supply_that have changed.
+ *
+ */
+static void ab8500_power_supply_changed(struct ab8500_charger *di,
+					struct power_supply *psy)
+{
+	if (di->pdata->autopower_cfg) {
+		if (!di->usb.charger_connected &&
+		    !di->ac.charger_connected &&
+		    di->autopower) {
+			di->autopower = false;
+			ab8500_power_loss_handling(di);
+		} else if (!di->autopower &&
+			   (di->ac.charger_connected ||
+			    di->usb.charger_connected)) {
+			di->autopower = true;
+			ab8500_power_loss_handling(di);
+		}
+	}
+	power_supply_changed(psy);
+}
+
+static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
+	bool connected)
+{
+	if (connected != di->usb.charger_connected) {
+		dev_dbg(di->dev, "USB connected:%i\n", connected);
+		di->usb.charger_connected = connected;
+		sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
+	}
+}
+
+/**
+ * ab8500_charger_get_ac_voltage() - get ac charger voltage
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Returns ac charger voltage (on success)
+ */
+static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
+{
+	int vch;
+
+	/* Only measure voltage if the charger is connected */
+	if (di->ac.charger_connected) {
+		vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
+		if (vch < 0)
+			dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
+	} else {
+		vch = 0;
+	}
+	return vch;
+}
+
+/**
+ * ab8500_charger_ac_cv() - check if the main charger is in CV mode
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_ac_cv(struct ab8500_charger *di)
+{
+	u8 val;
+	int ret = 0;
+
+	/* Only check CV mode if the charger is online */
+	if (di->ac.charger_online) {
+		ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_CH_STATUS1_REG, &val);
+		if (ret < 0) {
+			dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+			return 0;
+		}
+
+		if (val & MAIN_CH_CV_ON)
+			ret = 1;
+		else
+			ret = 0;
+	}
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_get_vbus_voltage() - get vbus voltage
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * This function returns the vbus voltage.
+ * Returns vbus voltage (on success)
+ */
+static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
+{
+	int vch;
+
+	/* Only measure voltage if the charger is connected */
+	if (di->usb.charger_connected) {
+		vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
+		if (vch < 0)
+			dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+	} else {
+		vch = 0;
+	}
+	return vch;
+}
+
+/**
+ * ab8500_charger_get_usb_current() - get usb charger current
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * This function returns the usb charger current.
+ * Returns usb current (on success) and error code on failure
+ */
+static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
+{
+	int ich;
+
+	/* Only measure current if the charger is online */
+	if (di->usb.charger_online) {
+		ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
+		if (ich < 0)
+			dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+	} else {
+		ich = 0;
+	}
+	return ich;
+}
+
+/**
+ * ab8500_charger_get_ac_current() - get ac charger current
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * This function returns the ac charger current.
+ * Returns ac current (on success) and error code on failure.
+ */
+static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
+{
+	int ich;
+
+	/* Only measure current if the charger is online */
+	if (di->ac.charger_online) {
+		ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
+		if (ich < 0)
+			dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+	} else {
+		ich = 0;
+	}
+	return ich;
+}
+
+/**
+ * ab8500_charger_usb_cv() - check if the usb charger is in CV mode
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_usb_cv(struct ab8500_charger *di)
+{
+	int ret;
+	u8 val;
+
+	/* Only check CV mode if the charger is online */
+	if (di->usb.charger_online) {
+		ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_CH_USBCH_STAT1_REG, &val);
+		if (ret < 0) {
+			dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+			return 0;
+		}
+
+		if (val & USB_CH_CV_ON)
+			ret = 1;
+		else
+			ret = 0;
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_detect_chargers() - Detect the connected chargers
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Returns the type of charger connected.
+ * For USB it will not mean we can actually charge from it
+ * but that there is a USB cable connected that we have to
+ * identify. This is used during startup when we don't get
+ * interrupts of the charger detection
+ *
+ * Returns an integer value, that means,
+ * NO_PW_CONN  no power supply is connected
+ * AC_PW_CONN  if the AC power supply is connected
+ * USB_PW_CONN  if the USB power supply is connected
+ * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
+ */
+static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
+{
+	int result = NO_PW_CONN;
+	int ret;
+	u8 val;
+
+	/* Check for AC charger */
+	ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_CH_STATUS1_REG, &val);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		return ret;
+	}
+
+	if (val & MAIN_CH_DET)
+		result = AC_PW_CONN;
+
+	/* Check for USB charger */
+	ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_CH_USBCH_STAT1_REG, &val);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		return ret;
+	}
+
+	if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
+		result |= USB_PW_CONN;
+
+	return result;
+}
+
+/**
+ * ab8500_charger_max_usb_curr() - get the max curr for the USB type
+ * @di:			pointer to the ab8500_charger structure
+ * @link_status:	the identified USB type
+ *
+ * Get the maximum current that is allowed to be drawn from the host
+ * based on the USB type.
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
+	enum ab8500_charger_link_status link_status)
+{
+	int ret = 0;
+
+	switch (link_status) {
+	case USB_STAT_STD_HOST_NC:
+	case USB_STAT_STD_HOST_C_NS:
+	case USB_STAT_STD_HOST_C_S:
+		dev_dbg(di->dev, "USB Type - Standard host is "
+			"detected through USB driver\n");
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+		break;
+	case USB_STAT_HOST_CHG_HS_CHIRP:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+		break;
+	case USB_STAT_HOST_CHG_HS:
+	case USB_STAT_ACA_RID_C_HS:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+		break;
+	case USB_STAT_ACA_RID_A:
+		/*
+		 * Dedicated charger level minus maximum current accessory
+		 * can consume (300mA). Closest level is 1100mA
+		 */
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+		break;
+	case USB_STAT_ACA_RID_B:
+		/*
+		 * Dedicated charger level minus 120mA (20mA for ACA and
+		 * 100mA for potential accessory). Closest level is 1300mA
+		 */
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+		break;
+	case USB_STAT_DEDICATED_CHG:
+	case USB_STAT_HOST_CHG_NM:
+	case USB_STAT_ACA_RID_C_HS_CHIRP:
+	case USB_STAT_ACA_RID_C_NM:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+		break;
+	case USB_STAT_RESERVED:
+		/*
+		 * This state is used to indicate that VBUS has dropped below
+		 * the detection level 4 times in a row. This is due to the
+		 * charger output current is set to high making the charger
+		 * voltage collapse. This have to be propagated through to
+		 * chargalg. This is done using the property
+		 * POWER_SUPPLY_PROP_CURRENT_AVG = 1
+		 */
+		di->flags.vbus_collapse = true;
+		dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
+			"VBUS has collapsed\n");
+		ret = -1;
+		break;
+	case USB_STAT_HM_IDGND:
+	case USB_STAT_NOT_CONFIGURED:
+	case USB_STAT_NOT_VALID_LINK:
+		dev_err(di->dev, "USB Type - Charging not allowed\n");
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+		ret = -ENXIO;
+		break;
+	default:
+		dev_err(di->dev, "USB Type - Unknown\n");
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+		ret = -ENXIO;
+		break;
+	};
+
+	dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+		link_status, di->max_usb_in_curr);
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_read_usb_type() - read the type of usb connected
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
+{
+	int ret;
+	u8 val;
+
+	ret = abx500_get_register_interruptible(di->dev,
+		AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG, &val);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		return ret;
+	}
+	ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+		AB8500_USB_LINE_STAT_REG, &val);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		return ret;
+	}
+
+	/* get the USB type */
+	val = (val & AB8500_USB_LINK_STATUS) >> 3;
+	ret = ab8500_charger_max_usb_curr(di,
+		(enum ab8500_charger_link_status) val);
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_detect_usb_type() - get the type of usb connected
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
+{
+	int i, ret;
+	u8 val;
+
+	/*
+	 * On getting the VBUS rising edge detect interrupt there
+	 * is a 250ms delay after which the register UsbLineStatus
+	 * is filled with valid data.
+	 */
+	for (i = 0; i < 10; i++) {
+		msleep(250);
+		ret = abx500_get_register_interruptible(di->dev,
+			AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
+			&val);
+		if (ret < 0) {
+			dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+			return ret;
+		}
+		ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+			AB8500_USB_LINE_STAT_REG, &val);
+		if (ret < 0) {
+			dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+			return ret;
+		}
+		/*
+		 * Until the IT source register is read the UsbLineStatus
+		 * register is not updated, hence doing the same
+		 * Revisit this:
+		 */
+
+		/* get the USB type */
+		val = (val & AB8500_USB_LINK_STATUS) >> 3;
+		if (val)
+			break;
+	}
+	ret = ab8500_charger_max_usb_curr(di,
+		(enum ab8500_charger_link_status) val);
+
+	return ret;
+}
+
+/*
+ * This array maps the raw hex value to charger voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_voltage_map[] = {
+	3500 ,
+	3525 ,
+	3550 ,
+	3575 ,
+	3600 ,
+	3625 ,
+	3650 ,
+	3675 ,
+	3700 ,
+	3725 ,
+	3750 ,
+	3775 ,
+	3800 ,
+	3825 ,
+	3850 ,
+	3875 ,
+	3900 ,
+	3925 ,
+	3950 ,
+	3975 ,
+	4000 ,
+	4025 ,
+	4050 ,
+	4060 ,
+	4070 ,
+	4080 ,
+	4090 ,
+	4100 ,
+	4110 ,
+	4120 ,
+	4130 ,
+	4140 ,
+	4150 ,
+	4160 ,
+	4170 ,
+	4180 ,
+	4190 ,
+	4200 ,
+	4210 ,
+	4220 ,
+	4230 ,
+	4240 ,
+	4250 ,
+	4260 ,
+	4270 ,
+	4280 ,
+	4290 ,
+	4300 ,
+	4310 ,
+	4320 ,
+	4330 ,
+	4340 ,
+	4350 ,
+	4360 ,
+	4370 ,
+	4380 ,
+	4390 ,
+	4400 ,
+	4410 ,
+	4420 ,
+	4430 ,
+	4440 ,
+	4450 ,
+	4460 ,
+	4470 ,
+	4480 ,
+	4490 ,
+	4500 ,
+	4510 ,
+	4520 ,
+	4530 ,
+	4540 ,
+	4550 ,
+	4560 ,
+	4570 ,
+	4580 ,
+	4590 ,
+	4600 ,
+};
+
+/*
+ * This array maps the raw hex value to charger current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_current_map[] = {
+	100 ,
+	200 ,
+	300 ,
+	400 ,
+	500 ,
+	600 ,
+	700 ,
+	800 ,
+	900 ,
+	1000 ,
+	1100 ,
+	1200 ,
+	1300 ,
+	1400 ,
+	1500 ,
+};
+
+/*
+ * This array maps the raw hex value to VBUS input current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_vbus_in_curr_map[] = {
+	USB_CH_IP_CUR_LVL_0P05,
+	USB_CH_IP_CUR_LVL_0P09,
+	USB_CH_IP_CUR_LVL_0P19,
+	USB_CH_IP_CUR_LVL_0P29,
+	USB_CH_IP_CUR_LVL_0P38,
+	USB_CH_IP_CUR_LVL_0P45,
+	USB_CH_IP_CUR_LVL_0P5,
+	USB_CH_IP_CUR_LVL_0P6,
+	USB_CH_IP_CUR_LVL_0P7,
+	USB_CH_IP_CUR_LVL_0P8,
+	USB_CH_IP_CUR_LVL_0P9,
+	USB_CH_IP_CUR_LVL_1P0,
+	USB_CH_IP_CUR_LVL_1P1,
+	USB_CH_IP_CUR_LVL_1P3,
+	USB_CH_IP_CUR_LVL_1P4,
+	USB_CH_IP_CUR_LVL_1P5,
+};
+
+static int ab8500_voltage_to_regval(int voltage)
+{
+	int i;
+
+	/* Special case for voltage below 3.5V */
+	if (voltage < ab8500_charger_voltage_map[0])
+		return LOW_VOLT_REG;
+
+	for (i = 1; i < ARRAY_SIZE(ab8500_charger_voltage_map); i++) {
+		if (voltage < ab8500_charger_voltage_map[i])
+			return i - 1;
+	}
+
+	/* If not last element, return error */
+	i = ARRAY_SIZE(ab8500_charger_voltage_map) - 1;
+	if (voltage == ab8500_charger_voltage_map[i])
+		return i;
+	else
+		return -1;
+}
+
+static int ab8500_current_to_regval(int curr)
+{
+	int i;
+
+	if (curr < ab8500_charger_current_map[0])
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(ab8500_charger_current_map); i++) {
+		if (curr < ab8500_charger_current_map[i])
+			return i - 1;
+	}
+
+	/* If not last element, return error */
+	i = ARRAY_SIZE(ab8500_charger_current_map) - 1;
+	if (curr == ab8500_charger_current_map[i])
+		return i;
+	else
+		return -1;
+}
+
+static int ab8500_vbus_in_curr_to_regval(int curr)
+{
+	int i;
+
+	if (curr < ab8500_charger_vbus_in_curr_map[0])
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(ab8500_charger_vbus_in_curr_map); i++) {
+		if (curr < ab8500_charger_vbus_in_curr_map[i])
+			return i - 1;
+	}
+
+	/* If not last element, return error */
+	i = ARRAY_SIZE(ab8500_charger_vbus_in_curr_map) - 1;
+	if (curr == ab8500_charger_vbus_in_curr_map[i])
+		return i;
+	else
+		return -1;
+}
+
+/**
+ * ab8500_charger_get_usb_cur() - get usb current
+ * @di:		pointer to the ab8500_charger structre
+ *
+ * The usb stack provides the maximum current that can be drawn from
+ * the standard usb host. This will be in mA.
+ * This function converts current in mA to a value that can be written
+ * to the register. Returns -1 if charging is not allowed
+ */
+static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
+{
+	switch (di->usb_state.usb_current) {
+	case 100:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+		break;
+	case 200:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+		break;
+	case 300:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+		break;
+	case 400:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+		break;
+	case 500:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+		break;
+	default:
+		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+		return -1;
+		break;
+	};
+	return 0;
+}
+
+/**
+ * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
+ * @di:		pointer to the ab8500_charger structure
+ * @ich_in:	charger input current limit
+ *
+ * Sets the current that can be drawn from the USB host
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
+		int ich_in)
+{
+	int ret;
+	int input_curr_index;
+	int min_value;
+
+	/* We should always use to lowest current limit */
+	min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+
+	switch (min_value) {
+	case 100:
+		if (di->vbat < VBAT_TRESH_IP_CUR_RED)
+			min_value = USB_CH_IP_CUR_LVL_0P05;
+		break;
+	case 500:
+		if (di->vbat < VBAT_TRESH_IP_CUR_RED)
+			min_value = USB_CH_IP_CUR_LVL_0P45;
+		break;
+	default:
+		break;
+	}
+
+	input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
+	if (input_curr_index < 0) {
+		dev_err(di->dev, "VBUS input current limit too high\n");
+		return -ENXIO;
+	}
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_USBCH_IPT_CRNTLVL_REG,
+		input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
+	if (ret)
+		dev_err(di->dev, "%s write failed\n", __func__);
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_led_en() - turn on/off chargign led
+ * @di:		pointer to the ab8500_charger structure
+ * @on:		flag to turn on/off the chargign led
+ *
+ * Power ON/OFF charging LED indication
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_led_en(struct ab8500_charger *di, int on)
+{
+	int ret;
+
+	if (on) {
+		/* Power ON charging LED indicator, set LED current to 5mA */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_LED_INDICATOR_PWM_CTRL,
+			(LED_IND_CUR_5MA | LED_INDICATOR_PWM_ENA));
+		if (ret) {
+			dev_err(di->dev, "Power ON LED failed\n");
+			return ret;
+		}
+		/* LED indicator PWM duty cycle 252/256 */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_LED_INDICATOR_PWM_DUTY,
+			LED_INDICATOR_PWM_DUTY_252_256);
+		if (ret) {
+			dev_err(di->dev, "Set LED PWM duty cycle failed\n");
+			return ret;
+		}
+	} else {
+		/* Power off charging LED indicator */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_LED_INDICATOR_PWM_CTRL,
+			LED_INDICATOR_PWM_DIS);
+		if (ret) {
+			dev_err(di->dev, "Power-off LED failed\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_ac_en() - enable or disable ac charging
+ * @di:		pointer to the ab8500_charger structure
+ * @enable:	enable/disable flag
+ * @vset:	charging voltage
+ * @iset:	charging current
+ *
+ * Enable/Disable AC/Mains charging and turns on/off the charging led
+ * respectively.
+ **/
+static int ab8500_charger_ac_en(struct ux500_charger *charger,
+	int enable, int vset, int iset)
+{
+	int ret;
+	int volt_index;
+	int curr_index;
+	int input_curr_index;
+	u8 overshoot = 0;
+
+	struct ab8500_charger *di = to_ab8500_charger_ac_device_info(charger);
+
+	if (enable) {
+		/* Check if AC is connected */
+		if (!di->ac.charger_connected) {
+			dev_err(di->dev, "AC charger not connected\n");
+			return -ENXIO;
+		}
+
+		/* Enable AC charging */
+		dev_dbg(di->dev, "Enable AC: %dmV %dmA\n", vset, iset);
+
+		/*
+		 * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
+		 * will be triggered everytime we enable the VDD ADC supply.
+		 * This will turn off charging for a short while.
+		 * It can be avoided by having the supply on when
+		 * there is a charger enabled. Normally the VDD ADC supply
+		 * is enabled everytime a GPADC conversion is triggered. We will
+		 * force it to be enabled from this driver to have
+		 * the GPADC module independant of the AB8500 chargers
+		 */
+		if (!di->vddadc_en_ac) {
+			regulator_enable(di->regu);
+			di->vddadc_en_ac = true;
+		}
+
+		/* Check if the requested voltage or current is valid */
+		volt_index = ab8500_voltage_to_regval(vset);
+		curr_index = ab8500_current_to_regval(iset);
+		input_curr_index = ab8500_current_to_regval(
+			di->bat->chg_params->ac_curr_max);
+		if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
+			dev_err(di->dev,
+				"Charger voltage or current too high, "
+				"charging not started\n");
+			return -ENXIO;
+		}
+
+		/* ChVoltLevel: maximum battery charging voltage */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+		if (ret) {
+			dev_err(di->dev, "%s write failed\n", __func__);
+			return ret;
+		}
+		/* MainChInputCurr: current that can be drawn from the charger*/
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_MCH_IPT_CURLVL_REG,
+			input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
+		if (ret) {
+			dev_err(di->dev, "%s write failed\n", __func__);
+			return ret;
+		}
+		/* ChOutputCurentLevel: protected output current */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+		if (ret) {
+			dev_err(di->dev, "%s write failed\n", __func__);
+			return ret;
+		}
+
+		/* Check if VBAT overshoot control should be enabled */
+		if (!di->bat->enable_overshoot)
+			overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
+
+		/* Enable Main Charger */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_MCH_CTRL1, MAIN_CH_ENA | overshoot);
+		if (ret) {
+			dev_err(di->dev, "%s write failed\n", __func__);
+			return ret;
+		}
+
+		/* Power on charging LED indication */
+		ret = ab8500_charger_led_en(di, true);
+		if (ret < 0)
+			dev_err(di->dev, "failed to enable LED\n");
+
+		di->ac.charger_online = 1;
+	} else {
+		/* Disable AC charging */
+		if (is_ab8500_1p1_or_earlier(di->parent)) {
+			/*
+			 * For ABB revision 1.0 and 1.1 there is a bug in the
+			 * watchdog logic. That means we have to continously
+			 * kick the charger watchdog even when no charger is
+			 * connected. This is only valid once the AC charger
+			 * has been enabled. This is a bug that is not handled
+			 * by the algorithm and the watchdog have to be kicked
+			 * by the charger driver when the AC charger
+			 * is disabled
+			 */
+			if (di->ac_conn) {
+				queue_delayed_work(di->charger_wq,
+					&di->kick_wd_work,
+					round_jiffies(WD_KICK_INTERVAL));
+			}
+
+			/*
+			 * We can't turn off charging completely
+			 * due to a bug in AB8500 cut1.
+			 * If we do, charging will not start again.
+			 * That is why we set the lowest voltage
+			 * and current possible
+			 */
+			ret = abx500_set_register_interruptible(di->dev,
+				AB8500_CHARGER,
+				AB8500_CH_VOLT_LVL_REG, CH_VOL_LVL_3P5);
+			if (ret) {
+				dev_err(di->dev,
+					"%s write failed\n", __func__);
+				return ret;
+			}
+
+			ret = abx500_set_register_interruptible(di->dev,
+				AB8500_CHARGER,
+				AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
+			if (ret) {
+				dev_err(di->dev,
+					"%s write failed\n", __func__);
+				return ret;
+			}
+		} else {
+			ret = abx500_set_register_interruptible(di->dev,
+				AB8500_CHARGER,
+				AB8500_MCH_CTRL1, 0);
+			if (ret) {
+				dev_err(di->dev,
+					"%s write failed\n", __func__);
+				return ret;
+			}
+		}
+
+		ret = ab8500_charger_led_en(di, false);
+		if (ret < 0)
+			dev_err(di->dev, "failed to disable LED\n");
+
+		di->ac.charger_online = 0;
+		di->ac.wd_expired = false;
+
+		/* Disable regulator if enabled */
+		if (di->vddadc_en_ac) {
+			regulator_disable(di->regu);
+			di->vddadc_en_ac = false;
+		}
+
+		dev_dbg(di->dev, "%s Disabled AC charging\n", __func__);
+	}
+	ab8500_power_supply_changed(di, &di->ac_chg.psy);
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_usb_en() - enable usb charging
+ * @di:		pointer to the ab8500_charger structure
+ * @enable:	enable/disable flag
+ * @vset:	charging voltage
+ * @ich_out:	charger output current
+ *
+ * Enable/Disable USB charging and turns on/off the charging led respectively.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_en(struct ux500_charger *charger,
+	int enable, int vset, int ich_out)
+{
+	int ret;
+	int volt_index;
+	int curr_index;
+	u8 overshoot = 0;
+
+	struct ab8500_charger *di = to_ab8500_charger_usb_device_info(charger);
+
+	if (enable) {
+		/* Check if USB is connected */
+		if (!di->usb.charger_connected) {
+			dev_err(di->dev, "USB charger not connected\n");
+			return -ENXIO;
+		}
+
+		/*
+		 * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
+		 * will be triggered everytime we enable the VDD ADC supply.
+		 * This will turn off charging for a short while.
+		 * It can be avoided by having the supply on when
+		 * there is a charger enabled. Normally the VDD ADC supply
+		 * is enabled everytime a GPADC conversion is triggered. We will
+		 * force it to be enabled from this driver to have
+		 * the GPADC module independant of the AB8500 chargers
+		 */
+		if (!di->vddadc_en_usb) {
+			regulator_enable(di->regu);
+			di->vddadc_en_usb = true;
+		}
+
+		/* Enable USB charging */
+		dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+
+		/* Check if the requested voltage or current is valid */
+		volt_index = ab8500_voltage_to_regval(vset);
+		curr_index = ab8500_current_to_regval(ich_out);
+		if (volt_index < 0 || curr_index < 0) {
+			dev_err(di->dev,
+				"Charger voltage or current too high, "
+				"charging not started\n");
+			return -ENXIO;
+		}
+
+		/* ChVoltLevel: max voltage upto which battery can be charged */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+		if (ret) {
+			dev_err(di->dev, "%s write failed\n", __func__);
+			return ret;
+		}
+		/* USBChInputCurr: current that can be drawn from the usb */
+		ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+		if (ret) {
+			dev_err(di->dev, "setting USBChInputCurr failed\n");
+			return ret;
+		}
+		/* ChOutputCurentLevel: protected output current */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+		if (ret) {
+			dev_err(di->dev, "%s write failed\n", __func__);
+			return ret;
+		}
+		/* Check if VBAT overshoot control should be enabled */
+		if (!di->bat->enable_overshoot)
+			overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
+
+		/* Enable USB Charger */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
+		if (ret) {
+			dev_err(di->dev, "%s write failed\n", __func__);
+			return ret;
+		}
+
+		/* If success power on charging LED indication */
+		ret = ab8500_charger_led_en(di, true);
+		if (ret < 0)
+			dev_err(di->dev, "failed to enable LED\n");
+
+		queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
+
+		di->usb.charger_online = 1;
+	} else {
+		/* Disable USB charging */
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_CHARGER,
+			AB8500_USBCH_CTRL1_REG, 0);
+		if (ret) {
+			dev_err(di->dev,
+				"%s write failed\n", __func__);
+			return ret;
+		}
+
+		ret = ab8500_charger_led_en(di, false);
+		if (ret < 0)
+			dev_err(di->dev, "failed to disable LED\n");
+
+		di->usb.charger_online = 0;
+		di->usb.wd_expired = false;
+
+		/* Disable regulator if enabled */
+		if (di->vddadc_en_usb) {
+			regulator_disable(di->regu);
+			di->vddadc_en_usb = false;
+		}
+
+		dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
+
+		/* Cancel any pending Vbat check work */
+		if (delayed_work_pending(&di->check_vbat_work))
+			cancel_delayed_work(&di->check_vbat_work);
+
+	}
+	ab8500_power_supply_changed(di, &di->usb_chg.psy);
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_watchdog_kick() - kick charger watchdog
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Kick charger watchdog
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_watchdog_kick(struct ux500_charger *charger)
+{
+	int ret;
+	struct ab8500_charger *di;
+
+	if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+		di = to_ab8500_charger_ac_device_info(charger);
+	else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+		di = to_ab8500_charger_usb_device_info(charger);
+	else
+		return -ENXIO;
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+	if (ret)
+		dev_err(di->dev, "Failed to kick WD!\n");
+
+	return ret;
+}
+
+/**
+ * ab8500_charger_update_charger_current() - update charger current
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Update the charger output current for the specified charger
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
+		int ich_out)
+{
+	int ret;
+	int curr_index;
+	struct ab8500_charger *di;
+
+	if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+		di = to_ab8500_charger_ac_device_info(charger);
+	else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+		di = to_ab8500_charger_usb_device_info(charger);
+	else
+		return -ENXIO;
+
+	curr_index = ab8500_current_to_regval(ich_out);
+	if (curr_index < 0) {
+		dev_err(di->dev,
+			"Charger current too high, "
+			"charging not started\n");
+		return -ENXIO;
+	}
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+	if (ret) {
+		dev_err(di->dev, "%s write failed\n", __func__);
+		return ret;
+	}
+
+	/* Reset the main and usb drop input current measurement counter */
+	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+				AB8500_CHARGER_CTRL,
+				0x1);
+	if (ret) {
+		dev_err(di->dev, "%s write failed\n", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
+{
+	struct power_supply *psy;
+	struct power_supply *ext;
+	struct ab8500_charger *di;
+	union power_supply_propval ret;
+	int i, j;
+	bool psy_found = false;
+	struct ux500_charger *usb_chg;
+
+	usb_chg = (struct ux500_charger *)data;
+	psy = &usb_chg->psy;
+
+	di = to_ab8500_charger_usb_device_info(usb_chg);
+
+	ext = dev_get_drvdata(dev);
+
+	/* For all psy where the driver name appears in any supplied_to */
+	for (i = 0; i < ext->num_supplicants; i++) {
+		if (!strcmp(ext->supplied_to[i], psy->name))
+			psy_found = true;
+	}
+
+	if (!psy_found)
+		return 0;
+
+	/* Go through all properties for the psy */
+	for (j = 0; j < ext->num_properties; j++) {
+		enum power_supply_property prop;
+		prop = ext->properties[j];
+
+		if (ext->get_property(ext, prop, &ret))
+			continue;
+
+		switch (prop) {
+		case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				di->vbat = ret.intval / 1000;
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * ab8500_charger_check_vbat_work() - keep vbus current within spec
+ * @work	pointer to the work_struct structure
+ *
+ * Due to a asic bug it is necessary to lower the input current to the vbus
+ * charger when charging with at some specific levels. This issue is only valid
+ * for below a certain battery voltage. This function makes sure that the
+ * the allowed current limit isn't exceeded.
+ */
+static void ab8500_charger_check_vbat_work(struct work_struct *work)
+{
+	int t = 10;
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, check_vbat_work.work);
+
+	class_for_each_device(power_supply_class, NULL,
+		&di->usb_chg.psy, ab8500_charger_get_ext_psy_data);
+
+	/* First run old_vbat is 0. */
+	if (di->old_vbat == 0)
+		di->old_vbat = di->vbat;
+
+	if (!((di->old_vbat <= VBAT_TRESH_IP_CUR_RED &&
+		di->vbat <= VBAT_TRESH_IP_CUR_RED) ||
+		(di->old_vbat > VBAT_TRESH_IP_CUR_RED &&
+		di->vbat > VBAT_TRESH_IP_CUR_RED))) {
+
+		dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d,"
+			" old: %d\n", di->max_usb_in_curr, di->vbat,
+			di->old_vbat);
+		ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+		power_supply_changed(&di->usb_chg.psy);
+	}
+
+	di->old_vbat = di->vbat;
+
+	/*
+	 * No need to check the battery voltage every second when not close to
+	 * the threshold.
+	 */
+	if (di->vbat < (VBAT_TRESH_IP_CUR_RED + 100) &&
+		(di->vbat > (VBAT_TRESH_IP_CUR_RED - 100)))
+			t = 1;
+
+	queue_delayed_work(di->charger_wq, &di->check_vbat_work, t * HZ);
+}
+
+/**
+ * ab8500_charger_check_hw_failure_work() - check main charger failure
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
+{
+	int ret;
+	u8 reg_value;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, check_hw_failure_work.work);
+
+	/* Check if the status bits for HW failure is still active */
+	if (di->flags.mainextchnotok) {
+		ret = abx500_get_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+		if (ret < 0) {
+			dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+			return;
+		}
+		if (!(reg_value & MAIN_CH_NOK)) {
+			di->flags.mainextchnotok = false;
+			ab8500_power_supply_changed(di, &di->ac_chg.psy);
+		}
+	}
+	if (di->flags.vbus_ovv) {
+		ret = abx500_get_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG,
+			&reg_value);
+		if (ret < 0) {
+			dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+			return;
+		}
+		if (!(reg_value & VBUS_OVV_TH)) {
+			di->flags.vbus_ovv = false;
+			ab8500_power_supply_changed(di, &di->usb_chg.psy);
+		}
+	}
+	/* If we still have a failure, schedule a new check */
+	if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+		queue_delayed_work(di->charger_wq,
+			&di->check_hw_failure_work, round_jiffies(HZ));
+	}
+}
+
+/**
+ * ab8500_charger_kick_watchdog_work() - kick the watchdog
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog.
+ *
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+static void ab8500_charger_kick_watchdog_work(struct work_struct *work)
+{
+	int ret;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, kick_wd_work.work);
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+	if (ret)
+		dev_err(di->dev, "Failed to kick WD!\n");
+
+	/* Schedule a new watchdog kick */
+	queue_delayed_work(di->charger_wq,
+		&di->kick_wd_work, round_jiffies(WD_KICK_INTERVAL));
+}
+
+/**
+ * ab8500_charger_ac_work() - work to get and set main charger status
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_ac_work(struct work_struct *work)
+{
+	int ret;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, ac_work);
+
+	/*
+	 * Since we can't be sure that the events are received
+	 * synchronously, we have the check if the main charger is
+	 * connected by reading the status register
+	 */
+	ret = ab8500_charger_detect_chargers(di);
+	if (ret < 0)
+		return;
+
+	if (ret & AC_PW_CONN) {
+		di->ac.charger_connected = 1;
+		di->ac_conn = true;
+	} else {
+		di->ac.charger_connected = 0;
+	}
+
+	ab8500_power_supply_changed(di, &di->ac_chg.psy);
+	sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
+}
+
+/**
+ * ab8500_charger_detect_usb_type_work() - work to detect USB type
+ * @work:	Pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
+{
+	int ret;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, detect_usb_type_work);
+
+	/*
+	 * Since we can't be sure that the events are received
+	 * synchronously, we have the check if is
+	 * connected by reading the status register
+	 */
+	ret = ab8500_charger_detect_chargers(di);
+	if (ret < 0)
+		return;
+
+	if (!(ret & USB_PW_CONN)) {
+		di->vbus_detected = 0;
+		ab8500_charger_set_usb_connected(di, false);
+		ab8500_power_supply_changed(di, &di->usb_chg.psy);
+	} else {
+		di->vbus_detected = 1;
+
+		if (is_ab8500_1p1_or_earlier(di->parent)) {
+			ret = ab8500_charger_detect_usb_type(di);
+			if (!ret) {
+				ab8500_charger_set_usb_connected(di, true);
+				ab8500_power_supply_changed(di,
+							    &di->usb_chg.psy);
+			}
+		} else {
+			/* For ABB cut2.0 and onwards we have an IRQ,
+			 * USB_LINK_STATUS that will be triggered when the USB
+			 * link status changes. The exception is USB connected
+			 * during startup. Then we don't get a
+			 * USB_LINK_STATUS IRQ
+			 */
+			if (di->vbus_detected_start) {
+				di->vbus_detected_start = false;
+				ret = ab8500_charger_detect_usb_type(di);
+				if (!ret) {
+					ab8500_charger_set_usb_connected(di,
+						true);
+					ab8500_power_supply_changed(di,
+						&di->usb_chg.psy);
+				}
+			}
+		}
+	}
+}
+
+/**
+ * ab8500_charger_usb_link_status_work() - work to detect USB type
+ * @work:	pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_usb_link_status_work(struct work_struct *work)
+{
+	int ret;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, usb_link_status_work);
+
+	/*
+	 * Since we can't be sure that the events are received
+	 * synchronously, we have the check if  is
+	 * connected by reading the status register
+	 */
+	ret = ab8500_charger_detect_chargers(di);
+	if (ret < 0)
+		return;
+
+	if (!(ret & USB_PW_CONN)) {
+		di->vbus_detected = 0;
+		ab8500_charger_set_usb_connected(di, false);
+		ab8500_power_supply_changed(di, &di->usb_chg.psy);
+	} else {
+		di->vbus_detected = 1;
+		ret = ab8500_charger_read_usb_type(di);
+		if (!ret) {
+			/* Update maximum input current */
+			ret = ab8500_charger_set_vbus_in_curr(di,
+					di->max_usb_in_curr);
+			if (ret)
+				return;
+
+			ab8500_charger_set_usb_connected(di, true);
+			ab8500_power_supply_changed(di, &di->usb_chg.psy);
+		} else if (ret == -ENXIO) {
+			/* No valid charger type detected */
+			ab8500_charger_set_usb_connected(di, false);
+			ab8500_power_supply_changed(di, &di->usb_chg.psy);
+		}
+	}
+}
+
+static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
+{
+	int ret;
+	unsigned long flags;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, usb_state_changed_work);
+
+	if (!di->vbus_detected)
+		return;
+
+	spin_lock_irqsave(&di->usb_state.usb_lock, flags);
+	di->usb_state.usb_changed = false;
+	spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
+
+	/*
+	 * wait for some time until you get updates from the usb stack
+	 * and negotiations are completed
+	 */
+	msleep(250);
+
+	if (di->usb_state.usb_changed)
+		return;
+
+	dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
+		__func__, di->usb_state.state, di->usb_state.usb_current);
+
+	switch (di->usb_state.state) {
+	case AB8500_BM_USB_STATE_RESET_HS:
+	case AB8500_BM_USB_STATE_RESET_FS:
+	case AB8500_BM_USB_STATE_SUSPEND:
+	case AB8500_BM_USB_STATE_MAX:
+		ab8500_charger_set_usb_connected(di, false);
+		ab8500_power_supply_changed(di, &di->usb_chg.psy);
+		break;
+
+	case AB8500_BM_USB_STATE_RESUME:
+		/*
+		 * when suspend->resume there should be delay
+		 * of 1sec for enabling charging
+		 */
+		msleep(1000);
+		/* Intentional fall through */
+	case AB8500_BM_USB_STATE_CONFIGURED:
+		/*
+		 * USB is configured, enable charging with the charging
+		 * input current obtained from USB driver
+		 */
+		if (!ab8500_charger_get_usb_cur(di)) {
+			/* Update maximum input current */
+			ret = ab8500_charger_set_vbus_in_curr(di,
+					di->max_usb_in_curr);
+			if (ret)
+				return;
+
+			ab8500_charger_set_usb_connected(di, true);
+			ab8500_power_supply_changed(di, &di->usb_chg.psy);
+		}
+		break;
+
+	default:
+		break;
+	};
+}
+
+/**
+ * ab8500_charger_check_usbchargernotok_work() - check USB chg not ok status
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB charger Not OK status
+ */
+static void ab8500_charger_check_usbchargernotok_work(struct work_struct *work)
+{
+	int ret;
+	u8 reg_value;
+	bool prev_status;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, check_usbchgnotok_work.work);
+
+	/* Check if the status bit for usbchargernotok is still active */
+	ret = abx500_get_register_interruptible(di->dev,
+		AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		return;
+	}
+	prev_status = di->flags.usbchargernotok;
+
+	if (reg_value & VBUS_CH_NOK) {
+		di->flags.usbchargernotok = true;
+		/* Check again in 1sec */
+		queue_delayed_work(di->charger_wq,
+			&di->check_usbchgnotok_work, HZ);
+	} else {
+		di->flags.usbchargernotok = false;
+		di->flags.vbus_collapse = false;
+	}
+
+	if (prev_status != di->flags.usbchargernotok)
+		ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_main_thermal_prot_work() - check main thermal status
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for checking the Main thermal prot status
+ */
+static void ab8500_charger_check_main_thermal_prot_work(
+	struct work_struct *work)
+{
+	int ret;
+	u8 reg_value;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, check_main_thermal_prot_work);
+
+	/* Check if the status bit for main_thermal_prot is still active */
+	ret = abx500_get_register_interruptible(di->dev,
+		AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		return;
+	}
+	if (reg_value & MAIN_CH_TH_PROT)
+		di->flags.main_thermal_prot = true;
+	else
+		di->flags.main_thermal_prot = false;
+
+	ab8500_power_supply_changed(di, &di->ac_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_usb_thermal_prot_work() - check usb thermal status
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB thermal prot status
+ */
+static void ab8500_charger_check_usb_thermal_prot_work(
+	struct work_struct *work)
+{
+	int ret;
+	u8 reg_value;
+
+	struct ab8500_charger *di = container_of(work,
+		struct ab8500_charger, check_usb_thermal_prot_work);
+
+	/* Check if the status bit for usb_thermal_prot is still active */
+	ret = abx500_get_register_interruptible(di->dev,
+		AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+	if (ret < 0) {
+		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+		return;
+	}
+	if (reg_value & USB_CH_TH_PROT)
+		di->flags.usb_thermal_prot = true;
+	else
+		di->flags.usb_thermal_prot = false;
+
+	ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_mainchunplugdet_handler() - main charger unplugged
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "Main charger unplugged\n");
+	queue_work(di->charger_wq, &di->ac_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchplugdet_handler() - main charger plugged
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "Main charger plugged\n");
+	queue_work(di->charger_wq, &di->ac_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainextchnotok_handler() - main charger not ok
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainextchnotok_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "Main charger not ok\n");
+	di->flags.mainextchnotok = true;
+	ab8500_power_supply_changed(di, &di->ac_chg.psy);
+
+	/* Schedule a new HW failure check */
+	queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotr_handler() - Die temp is above main charger
+ * thermal protection threshold
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotr_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev,
+		"Die temp above Main charger thermal protection threshold\n");
+	queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotf_handler() - Die temp is below main charger
+ * thermal protection threshold
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev,
+		"Die temp ok for Main charger thermal protection threshold\n");
+	queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetf_handler() - VBUS falling detected
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "VBUS falling detected\n");
+	queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetr_handler() - VBUS rising detected
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	di->vbus_detected = true;
+	dev_dbg(di->dev, "VBUS rising detected\n");
+	queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usblinkstatus_handler() - USB link status has changed
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usblinkstatus_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "USB link status changed\n");
+
+	queue_work(di->charger_wq, &di->usb_link_status_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotr_handler() - Die temp is above usb charger
+ * thermal protection threshold
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotr_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev,
+		"Die temp above USB charger thermal protection threshold\n");
+	queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotf_handler() - Die temp is below usb charger
+ * thermal protection threshold
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotf_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev,
+		"Die temp ok for USB charger thermal protection threshold\n");
+	queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchargernotokr_handler() - USB charger not ok detected
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchargernotokr_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "Not allowed USB charger detected\n");
+	queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_chwdexp_handler() - Charger watchdog expired
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "Charger watchdog expired\n");
+
+	/*
+	 * The charger that was online when the watchdog expired
+	 * needs to be restarted for charging to start again
+	 */
+	if (di->ac.charger_online) {
+		di->ac.wd_expired = true;
+		ab8500_power_supply_changed(di, &di->ac_chg.psy);
+	}
+	if (di->usb.charger_online) {
+		di->usb.wd_expired = true;
+		ab8500_power_supply_changed(di, &di->usb_chg.psy);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusovv_handler(int irq, void *_di)
+{
+	struct ab8500_charger *di = _di;
+
+	dev_dbg(di->dev, "VBUS overvoltage detected\n");
+	di->flags.vbus_ovv = true;
+	ab8500_power_supply_changed(di, &di->usb_chg.psy);
+
+	/* Schedule a new HW failure check */
+	queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_ac_get_property() - get the ac/mains properties
+ * @psy:       pointer to the power_supply structure
+ * @psp:       pointer to the power_supply_property structure
+ * @val:       pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the ac/mains
+ * properties by reading the sysfs files.
+ * AC/Mains properties are online, present and voltage.
+ * online:     ac/mains charging is in progress or not
+ * present:    presence of the ac/mains
+ * voltage:    AC/Mains voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_ac_get_property(struct power_supply *psy,
+	enum power_supply_property psp,
+	union power_supply_propval *val)
+{
+	struct ab8500_charger *di;
+
+	di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_HEALTH:
+		if (di->flags.mainextchnotok)
+			val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+		else if (di->ac.wd_expired || di->usb.wd_expired)
+			val->intval = POWER_SUPPLY_HEALTH_DEAD;
+		else if (di->flags.main_thermal_prot)
+			val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+		else
+			val->intval = POWER_SUPPLY_HEALTH_GOOD;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = di->ac.charger_online;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = di->ac.charger_connected;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di);
+		val->intval = di->ac.charger_voltage * 1000;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+		/*
+		 * This property is used to indicate when CV mode is entered
+		 * for the AC charger
+		 */
+		di->ac.cv_active = ab8500_charger_ac_cv(di);
+		val->intval = di->ac.cv_active;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = ab8500_charger_get_ac_current(di) * 1000;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * ab8500_charger_usb_get_property() - get the usb properties
+ * @psy:        pointer to the power_supply structure
+ * @psp:        pointer to the power_supply_property structure
+ * @val:        pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the usb
+ * properties by reading the sysfs files.
+ * USB properties are online, present and voltage.
+ * online:     usb charging is in progress or not
+ * present:    presence of the usb
+ * voltage:    vbus voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_get_property(struct power_supply *psy,
+	enum power_supply_property psp,
+	union power_supply_propval *val)
+{
+	struct ab8500_charger *di;
+
+	di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_HEALTH:
+		if (di->flags.usbchargernotok)
+			val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+		else if (di->ac.wd_expired || di->usb.wd_expired)
+			val->intval = POWER_SUPPLY_HEALTH_DEAD;
+		else if (di->flags.usb_thermal_prot)
+			val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+		else if (di->flags.vbus_ovv)
+			val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+		else
+			val->intval = POWER_SUPPLY_HEALTH_GOOD;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = di->usb.charger_online;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = di->usb.charger_connected;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di);
+		val->intval = di->usb.charger_voltage * 1000;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+		/*
+		 * This property is used to indicate when CV mode is entered
+		 * for the USB charger
+		 */
+		di->usb.cv_active = ab8500_charger_usb_cv(di);
+		val->intval = di->usb.cv_active;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = ab8500_charger_get_usb_current(di) * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_AVG:
+		/*
+		 * This property is used to indicate when VBUS has collapsed
+		 * due to too high output current from the USB charger
+		 */
+		if (di->flags.vbus_collapse)
+			val->intval = 1;
+		else
+			val->intval = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * ab8500_charger_init_hw_registers() - Set up charger related registers
+ * @di:		pointer to the ab8500_charger structure
+ *
+ * Set up charger OVV, watchdog and maximum voltage registers as well as
+ * charging of the backup battery
+ */
+static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
+{
+	int ret = 0;
+
+	/* Setup maximum charger current and voltage for ABB cut2.0 */
+	if (!is_ab8500_1p1_or_earlier(di->parent)) {
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_CHARGER,
+			AB8500_CH_VOLT_LVL_MAX_REG, CH_VOL_LVL_4P6);
+		if (ret) {
+			dev_err(di->dev,
+				"failed to set CH_VOLT_LVL_MAX_REG\n");
+			goto out;
+		}
+
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_CHARGER,
+			AB8500_CH_OPT_CRNTLVL_MAX_REG, CH_OP_CUR_LVL_1P6);
+		if (ret) {
+			dev_err(di->dev,
+				"failed to set CH_OPT_CRNTLVL_MAX_REG\n");
+			goto out;
+		}
+	}
+
+	/* VBUS OVV set to 6.3V and enable automatic current limitiation */
+	ret = abx500_set_register_interruptible(di->dev,
+		AB8500_CHARGER,
+		AB8500_USBCH_CTRL2_REG,
+		VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
+	if (ret) {
+		dev_err(di->dev, "failed to set VBUS OVV\n");
+		goto out;
+	}
+
+	/* Enable main watchdog in OTP */
+	ret = abx500_set_register_interruptible(di->dev,
+		AB8500_OTP_EMUL, AB8500_OTP_CONF_15, OTP_ENABLE_WD);
+	if (ret) {
+		dev_err(di->dev, "failed to enable main WD in OTP\n");
+		goto out;
+	}
+
+	/* Enable main watchdog */
+	ret = abx500_set_register_interruptible(di->dev,
+		AB8500_SYS_CTRL2_BLOCK,
+		AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_ENA);
+	if (ret) {
+		dev_err(di->dev, "faile to enable main watchdog\n");
+		goto out;
+	}
+
+	/*
+	 * Due to internal synchronisation, Enable and Kick watchdog bits
+	 * cannot be enabled in a single write.
+	 * A minimum delay of 2*32 kHz period (62.5µs) must be inserted
+	 * between writing Enable then Kick bits.
+	 */
+	udelay(63);
+
+	/* Kick main watchdog */
+	ret = abx500_set_register_interruptible(di->dev,
+		AB8500_SYS_CTRL2_BLOCK,
+		AB8500_MAIN_WDOG_CTRL_REG,
+		(MAIN_WDOG_ENA | MAIN_WDOG_KICK));
+	if (ret) {
+		dev_err(di->dev, "failed to kick main watchdog\n");
+		goto out;
+	}
+
+	/* Disable main watchdog */
+	ret = abx500_set_register_interruptible(di->dev,
+		AB8500_SYS_CTRL2_BLOCK,
+		AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_DIS);
+	if (ret) {
+		dev_err(di->dev, "failed to disable main watchdog\n");
+		goto out;
+	}
+
+	/* Set watchdog timeout */
+	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+		AB8500_CH_WD_TIMER_REG, WD_TIMER);
+	if (ret) {
+		dev_err(di->dev, "failed to set charger watchdog timeout\n");
+		goto out;
+	}
+
+	/* Backup battery voltage and current */
+	ret = abx500_set_register_interruptible(di->dev,
+		AB8500_RTC,
+		AB8500_RTC_BACKUP_CHG_REG,
+		di->bat->bkup_bat_v |
+		di->bat->bkup_bat_i);
+	if (ret) {
+		dev_err(di->dev, "failed to setup backup battery charging\n");
+		goto out;
+	}
+
+	/* Enable backup battery charging */
+	abx500_mask_and_set_register_interruptible(di->dev,
+		AB8500_RTC, AB8500_RTC_CTRL_REG,
+		RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
+	if (ret < 0)
+		dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+out:
+	return ret;
+}
+
+/*
+ * ab8500 charger driver interrupts and their respective isr
+ */
+static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
+	{"MAIN_CH_UNPLUG_DET", ab8500_charger_mainchunplugdet_handler},
+	{"MAIN_CHARGE_PLUG_DET", ab8500_charger_mainchplugdet_handler},
+	{"MAIN_EXT_CH_NOT_OK", ab8500_charger_mainextchnotok_handler},
+	{"MAIN_CH_TH_PROT_R", ab8500_charger_mainchthprotr_handler},
+	{"MAIN_CH_TH_PROT_F", ab8500_charger_mainchthprotf_handler},
+	{"VBUS_DET_F", ab8500_charger_vbusdetf_handler},
+	{"VBUS_DET_R", ab8500_charger_vbusdetr_handler},
+	{"USB_LINK_STATUS", ab8500_charger_usblinkstatus_handler},
+	{"USB_CH_TH_PROT_R", ab8500_charger_usbchthprotr_handler},
+	{"USB_CH_TH_PROT_F", ab8500_charger_usbchthprotf_handler},
+	{"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
+	{"VBUS_OVV", ab8500_charger_vbusovv_handler},
+	{"CH_WD_EXP", ab8500_charger_chwdexp_handler},
+};
+
+static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
+		unsigned long event, void *power)
+{
+	struct ab8500_charger *di =
+		container_of(nb, struct ab8500_charger, nb);
+	enum ab8500_usb_state bm_usb_state;
+	unsigned mA = *((unsigned *)power);
+
+	if (event != USB_EVENT_VBUS) {
+		dev_dbg(di->dev, "not a standard host, returning\n");
+		return NOTIFY_DONE;
+	}
+
+	/* TODO: State is fabricate  here. See if charger really needs USB
+	 * state or if mA is enough
+	 */
+	if ((di->usb_state.usb_current == 2) && (mA > 2))
+		bm_usb_state = AB8500_BM_USB_STATE_RESUME;
+	else if (mA == 0)
+		bm_usb_state = AB8500_BM_USB_STATE_RESET_HS;
+	else if (mA == 2)
+		bm_usb_state = AB8500_BM_USB_STATE_SUSPEND;
+	else if (mA >= 8) /* 8, 100, 500 */
+		bm_usb_state = AB8500_BM_USB_STATE_CONFIGURED;
+	else /* Should never occur */
+		bm_usb_state = AB8500_BM_USB_STATE_RESET_FS;
+
+	dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
+		__func__, bm_usb_state, mA);
+
+	spin_lock(&di->usb_state.usb_lock);
+	di->usb_state.usb_changed = true;
+	spin_unlock(&di->usb_state.usb_lock);
+
+	di->usb_state.state = bm_usb_state;
+	di->usb_state.usb_current = mA;
+
+	queue_work(di->charger_wq, &di->usb_state_changed_work);
+
+	return NOTIFY_OK;
+}
+
+#if defined(CONFIG_PM)
+static int ab8500_charger_resume(struct platform_device *pdev)
+{
+	int ret;
+	struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+	/*
+	 * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+	 * logic. That means we have to continously kick the charger
+	 * watchdog even when no charger is connected. This is only
+	 * valid once the AC charger has been enabled. This is
+	 * a bug that is not handled by the algorithm and the
+	 * watchdog have to be kicked by the charger driver
+	 * when the AC charger is disabled
+	 */
+	if (di->ac_conn && is_ab8500_1p1_or_earlier(di->parent)) {
+		ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+			AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+		if (ret)
+			dev_err(di->dev, "Failed to kick WD!\n");
+
+		/* If not already pending start a new timer */
+		if (!delayed_work_pending(
+			&di->kick_wd_work)) {
+			queue_delayed_work(di->charger_wq, &di->kick_wd_work,
+				round_jiffies(WD_KICK_INTERVAL));
+		}
+	}
+
+	/* If we still have a HW failure, schedule a new check */
+	if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+		queue_delayed_work(di->charger_wq,
+			&di->check_hw_failure_work, 0);
+	}
+
+	return 0;
+}
+
+static int ab8500_charger_suspend(struct platform_device *pdev,
+	pm_message_t state)
+{
+	struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+	/* Cancel any pending HW failure check */
+	if (delayed_work_pending(&di->check_hw_failure_work))
+		cancel_delayed_work(&di->check_hw_failure_work);
+
+	return 0;
+}
+#else
+#define ab8500_charger_suspend      NULL
+#define ab8500_charger_resume       NULL
+#endif
+
+static int __devexit ab8500_charger_remove(struct platform_device *pdev)
+{
+	struct ab8500_charger *di = platform_get_drvdata(pdev);
+	int i, irq, ret;
+
+	/* Disable AC charging */
+	ab8500_charger_ac_en(&di->ac_chg, false, 0, 0);
+
+	/* Disable USB charging */
+	ab8500_charger_usb_en(&di->usb_chg, false, 0, 0);
+
+	/* Disable interrupts */
+	for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+		irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+		free_irq(irq, di);
+	}
+
+	/* disable the regulator */
+	regulator_put(di->regu);
+
+	/* Backup battery voltage and current disable */
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+		AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
+	if (ret < 0)
+		dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+	usb_unregister_notifier(di->usb_phy, &di->nb);
+	usb_put_transceiver(di->usb_phy);
+
+	/* Delete the work queue */
+	destroy_workqueue(di->charger_wq);
+
+	flush_scheduled_work();
+	power_supply_unregister(&di->usb_chg.psy);
+	power_supply_unregister(&di->ac_chg.psy);
+	platform_set_drvdata(pdev, NULL);
+	kfree(di);
+
+	return 0;
+}
+
+static int __devinit ab8500_charger_probe(struct platform_device *pdev)
+{
+	int irq, i, charger_status, ret = 0;
+	struct abx500_bm_plat_data *plat_data;
+
+	struct ab8500_charger *di =
+		kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+	if (!di)
+		return -ENOMEM;
+
+	/* get parent data */
+	di->dev = &pdev->dev;
+	di->parent = dev_get_drvdata(pdev->dev.parent);
+	di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+	/* initialize lock */
+	spin_lock_init(&di->usb_state.usb_lock);
+
+	/* get charger specific platform data */
+	plat_data = pdev->dev.platform_data;
+	di->pdata = plat_data->charger;
+
+	if (!di->pdata) {
+		dev_err(di->dev, "no charger platform data supplied\n");
+		ret = -EINVAL;
+		goto free_device_info;
+	}
+
+	/* get battery specific platform data */
+	di->bat = plat_data->battery;
+	if (!di->bat) {
+		dev_err(di->dev, "no battery platform data supplied\n");
+		ret = -EINVAL;
+		goto free_device_info;
+	}
+
+	di->autopower = false;
+
+	/* AC supply */
+	/* power_supply base class */
+	di->ac_chg.psy.name = "ab8500_ac";
+	di->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
+	di->ac_chg.psy.properties = ab8500_charger_ac_props;
+	di->ac_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_ac_props);
+	di->ac_chg.psy.get_property = ab8500_charger_ac_get_property;
+	di->ac_chg.psy.supplied_to = di->pdata->supplied_to;
+	di->ac_chg.psy.num_supplicants = di->pdata->num_supplicants;
+	/* ux500_charger sub-class */
+	di->ac_chg.ops.enable = &ab8500_charger_ac_en;
+	di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+	di->ac_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+	di->ac_chg.max_out_volt = ab8500_charger_voltage_map[
+		ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+	di->ac_chg.max_out_curr = ab8500_charger_current_map[
+		ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+	/* USB supply */
+	/* power_supply base class */
+	di->usb_chg.psy.name = "ab8500_usb";
+	di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
+	di->usb_chg.psy.properties = ab8500_charger_usb_props;
+	di->usb_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_usb_props);
+	di->usb_chg.psy.get_property = ab8500_charger_usb_get_property;
+	di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
+	di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+	/* ux500_charger sub-class */
+	di->usb_chg.ops.enable = &ab8500_charger_usb_en;
+	di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+	di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+	di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
+		ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+	di->usb_chg.max_out_curr = ab8500_charger_current_map[
+		ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+
+	/* Create a work queue for the charger */
+	di->charger_wq =
+		create_singlethread_workqueue("ab8500_charger_wq");
+	if (di->charger_wq == NULL) {
+		dev_err(di->dev, "failed to create work queue\n");
+		goto free_device_info;
+	}
+
+	/* Init work for HW failure check */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+		ab8500_charger_check_hw_failure_work);
+	INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+		ab8500_charger_check_usbchargernotok_work);
+
+	/*
+	 * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+	 * logic. That means we have to continously kick the charger
+	 * watchdog even when no charger is connected. This is only
+	 * valid once the AC charger has been enabled. This is
+	 * a bug that is not handled by the algorithm and the
+	 * watchdog have to be kicked by the charger driver
+	 * when the AC charger is disabled
+	 */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
+		ab8500_charger_kick_watchdog_work);
+
+	INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
+		ab8500_charger_check_vbat_work);
+
+	/* Init work for charger detection */
+	INIT_WORK(&di->usb_link_status_work,
+		ab8500_charger_usb_link_status_work);
+	INIT_WORK(&di->ac_work, ab8500_charger_ac_work);
+	INIT_WORK(&di->detect_usb_type_work,
+		ab8500_charger_detect_usb_type_work);
+
+	INIT_WORK(&di->usb_state_changed_work,
+		ab8500_charger_usb_state_changed_work);
+
+	/* Init work for checking HW status */
+	INIT_WORK(&di->check_main_thermal_prot_work,
+		ab8500_charger_check_main_thermal_prot_work);
+	INIT_WORK(&di->check_usb_thermal_prot_work,
+		ab8500_charger_check_usb_thermal_prot_work);
+
+	/*
+	 * VDD ADC supply needs to be enabled from this driver when there
+	 * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
+	 * interrupts during charging
+	 */
+	di->regu = regulator_get(di->dev, "vddadc");
+	if (IS_ERR(di->regu)) {
+		ret = PTR_ERR(di->regu);
+		dev_err(di->dev, "failed to get vddadc regulator\n");
+		goto free_charger_wq;
+	}
+
+
+	/* Initialize OVV, and other registers */
+	ret = ab8500_charger_init_hw_registers(di);
+	if (ret) {
+		dev_err(di->dev, "failed to initialize ABB registers\n");
+		goto free_regulator;
+	}
+
+	/* Register AC charger class */
+	ret = power_supply_register(di->dev, &di->ac_chg.psy);
+	if (ret) {
+		dev_err(di->dev, "failed to register AC charger\n");
+		goto free_regulator;
+	}
+
+	/* Register USB charger class */
+	ret = power_supply_register(di->dev, &di->usb_chg.psy);
+	if (ret) {
+		dev_err(di->dev, "failed to register USB charger\n");
+		goto free_ac;
+	}
+
+	di->usb_phy = usb_get_transceiver();
+	if (!di->usb_phy) {
+		dev_err(di->dev, "failed to get usb transceiver\n");
+		ret = -EINVAL;
+		goto free_usb;
+	}
+	di->nb.notifier_call = ab8500_charger_usb_notifier_call;
+	ret = usb_register_notifier(di->usb_phy, &di->nb);
+	if (ret) {
+		dev_err(di->dev, "failed to register usb notifier\n");
+		goto put_usb_phy;
+	}
+
+	/* Identify the connected charger types during startup */
+	charger_status = ab8500_charger_detect_chargers(di);
+	if (charger_status & AC_PW_CONN) {
+		di->ac.charger_connected = 1;
+		di->ac_conn = true;
+		ab8500_power_supply_changed(di, &di->ac_chg.psy);
+		sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
+	}
+
+	if (charger_status & USB_PW_CONN) {
+		dev_dbg(di->dev, "VBUS Detect during startup\n");
+		di->vbus_detected = true;
+		di->vbus_detected_start = true;
+		queue_work(di->charger_wq,
+			&di->detect_usb_type_work);
+	}
+
+	/* Register interrupts */
+	for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+		irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+		ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
+			IRQF_SHARED | IRQF_NO_SUSPEND,
+			ab8500_charger_irq[i].name, di);
+
+		if (ret != 0) {
+			dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+				, ab8500_charger_irq[i].name, irq, ret);
+			goto free_irq;
+		}
+		dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+			ab8500_charger_irq[i].name, irq, ret);
+	}
+
+	platform_set_drvdata(pdev, di);
+
+	return ret;
+
+free_irq:
+	usb_unregister_notifier(di->usb_phy, &di->nb);
+
+	/* We also have to free all successfully registered irqs */
+	for (i = i - 1; i >= 0; i--) {
+		irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+		free_irq(irq, di);
+	}
+put_usb_phy:
+	usb_put_transceiver(di->usb_phy);
+free_usb:
+	power_supply_unregister(&di->usb_chg.psy);
+free_ac:
+	power_supply_unregister(&di->ac_chg.psy);
+free_regulator:
+	regulator_put(di->regu);
+free_charger_wq:
+	destroy_workqueue(di->charger_wq);
+free_device_info:
+	kfree(di);
+
+	return ret;
+}
+
+static struct platform_driver ab8500_charger_driver = {
+	.probe = ab8500_charger_probe,
+	.remove = __devexit_p(ab8500_charger_remove),
+	.suspend = ab8500_charger_suspend,
+	.resume = ab8500_charger_resume,
+	.driver = {
+		.name = "ab8500-charger",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init ab8500_charger_init(void)
+{
+	return platform_driver_register(&ab8500_charger_driver);
+}
+
+static void __exit ab8500_charger_exit(void)
+{
+	platform_driver_unregister(&ab8500_charger_driver);
+}
+
+subsys_initcall_sync(ab8500_charger_init);
+module_exit(ab8500_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-charger");
+MODULE_DESCRIPTION("AB8500 charger management driver");
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
new file mode 100644
index 0000000..c22f2f0
--- /dev/null
+++ b/drivers/power/ab8500_fg.c
@@ -0,0 +1,2637 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2012
+ *
+ * Main and Back-up battery management driver.
+ *
+ * Note: Backup battery management is required in case of Li-Ion battery and not
+ * for capacitive battery. HREF boards have capacitive battery and hence backup
+ * battery management is not used and the supported code is available in this
+ * driver.
+ *
+ * License Terms: GNU General Public License v2
+ * Author:
+ *	Johan Palsson <johan.palsson@stericsson.com>
+ *	Karl Komierowski <karl.komierowski@stericsson.com>
+ *	Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500.h>
+#include <linux/time.h>
+#include <linux/completion.h>
+
+#define MILLI_TO_MICRO			1000
+#define FG_LSB_IN_MA			1627
+#define QLSB_NANO_AMP_HOURS_X10		1129
+#define INS_CURR_TIMEOUT		(3 * HZ)
+
+#define SEC_TO_SAMPLE(S)		(S * 4)
+
+#define NBR_AVG_SAMPLES			20
+
+#define LOW_BAT_CHECK_INTERVAL		(2 * HZ)
+
+#define VALID_CAPACITY_SEC		(45 * 60) /* 45 minutes */
+#define BATT_OK_MIN			2360 /* mV */
+#define BATT_OK_INCREMENT		50 /* mV */
+#define BATT_OK_MAX_NR_INCREMENTS	0xE
+
+/* FG constants */
+#define BATT_OVV			0x01
+
+#define interpolate(x, x1, y1, x2, y2) \
+	((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
+
+#define to_ab8500_fg_device_info(x) container_of((x), \
+	struct ab8500_fg, fg_psy);
+
+/**
+ * struct ab8500_fg_interrupts - ab8500 fg interupts
+ * @name:	name of the interrupt
+ * @isr		function pointer to the isr
+ */
+struct ab8500_fg_interrupts {
+	char *name;
+	irqreturn_t (*isr)(int irq, void *data);
+};
+
+enum ab8500_fg_discharge_state {
+	AB8500_FG_DISCHARGE_INIT,
+	AB8500_FG_DISCHARGE_INITMEASURING,
+	AB8500_FG_DISCHARGE_INIT_RECOVERY,
+	AB8500_FG_DISCHARGE_RECOVERY,
+	AB8500_FG_DISCHARGE_READOUT_INIT,
+	AB8500_FG_DISCHARGE_READOUT,
+	AB8500_FG_DISCHARGE_WAKEUP,
+};
+
+static char *discharge_state[] = {
+	"DISCHARGE_INIT",
+	"DISCHARGE_INITMEASURING",
+	"DISCHARGE_INIT_RECOVERY",
+	"DISCHARGE_RECOVERY",
+	"DISCHARGE_READOUT_INIT",
+	"DISCHARGE_READOUT",
+	"DISCHARGE_WAKEUP",
+};
+
+enum ab8500_fg_charge_state {
+	AB8500_FG_CHARGE_INIT,
+	AB8500_FG_CHARGE_READOUT,
+};
+
+static char *charge_state[] = {
+	"CHARGE_INIT",
+	"CHARGE_READOUT",
+};
+
+enum ab8500_fg_calibration_state {
+	AB8500_FG_CALIB_INIT,
+	AB8500_FG_CALIB_WAIT,
+	AB8500_FG_CALIB_END,
+};
+
+struct ab8500_fg_avg_cap {
+	int avg;
+	int samples[NBR_AVG_SAMPLES];
+	__kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+	int pos;
+	int nbr_samples;
+	int sum;
+};
+
+struct ab8500_fg_battery_capacity {
+	int max_mah_design;
+	int max_mah;
+	int mah;
+	int permille;
+	int level;
+	int prev_mah;
+	int prev_percent;
+	int prev_level;
+	int user_mah;
+};
+
+struct ab8500_fg_flags {
+	bool fg_enabled;
+	bool conv_done;
+	bool charging;
+	bool fully_charged;
+	bool force_full;
+	bool low_bat_delay;
+	bool low_bat;
+	bool bat_ovv;
+	bool batt_unknown;
+	bool calibrate;
+	bool user_cap;
+	bool batt_id_received;
+};
+
+struct inst_curr_result_list {
+	struct list_head list;
+	int *result;
+};
+
+/**
+ * struct ab8500_fg - ab8500 FG device information
+ * @dev:		Pointer to the structure device
+ * @node:		a list of AB8500 FGs, hence prepared for reentrance
+ * @irq			holds the CCEOC interrupt number
+ * @vbat:		Battery voltage in mV
+ * @vbat_nom:		Nominal battery voltage in mV
+ * @inst_curr:		Instantenous battery current in mA
+ * @avg_curr:		Average battery current in mA
+ * @bat_temp		battery temperature
+ * @fg_samples:		Number of samples used in the FG accumulation
+ * @accu_charge:	Accumulated charge from the last conversion
+ * @recovery_cnt:	Counter for recovery mode
+ * @high_curr_cnt:	Counter for high current mode
+ * @init_cnt:		Counter for init mode
+ * @recovery_needed:	Indicate if recovery is needed
+ * @high_curr_mode:	Indicate if we're in high current mode
+ * @init_capacity:	Indicate if initial capacity measuring should be done
+ * @turn_off_fg:	True if fg was off before current measurement
+ * @calib_state		State during offset calibration
+ * @discharge_state:	Current discharge state
+ * @charge_state:	Current charge state
+ * @ab8500_fg_complete	Completion struct used for the instant current reading
+ * @flags:		Structure for information about events triggered
+ * @bat_cap:		Structure for battery capacity specific parameters
+ * @avg_cap:		Average capacity filter
+ * @parent:		Pointer to the struct ab8500
+ * @gpadc:		Pointer to the struct gpadc
+ * @pdata:		Pointer to the abx500_fg platform data
+ * @bat:		Pointer to the abx500_bm platform data
+ * @fg_psy:		Structure that holds the FG specific battery properties
+ * @fg_wq:		Work queue for running the FG algorithm
+ * @fg_periodic_work:	Work to run the FG algorithm periodically
+ * @fg_low_bat_work:	Work to check low bat condition
+ * @fg_reinit_work	Work used to reset and reinitialise the FG algorithm
+ * @fg_work:		Work to run the FG algorithm instantly
+ * @fg_acc_cur_work:	Work to read the FG accumulator
+ * @fg_check_hw_failure_work:	Work for checking HW state
+ * @cc_lock:		Mutex for locking the CC
+ * @fg_kobject:		Structure of type kobject
+ */
+struct ab8500_fg {
+	struct device *dev;
+	struct list_head node;
+	int irq;
+	int vbat;
+	int vbat_nom;
+	int inst_curr;
+	int avg_curr;
+	int bat_temp;
+	int fg_samples;
+	int accu_charge;
+	int recovery_cnt;
+	int high_curr_cnt;
+	int init_cnt;
+	bool recovery_needed;
+	bool high_curr_mode;
+	bool init_capacity;
+	bool turn_off_fg;
+	enum ab8500_fg_calibration_state calib_state;
+	enum ab8500_fg_discharge_state discharge_state;
+	enum ab8500_fg_charge_state charge_state;
+	struct completion ab8500_fg_complete;
+	struct ab8500_fg_flags flags;
+	struct ab8500_fg_battery_capacity bat_cap;
+	struct ab8500_fg_avg_cap avg_cap;
+	struct ab8500 *parent;
+	struct ab8500_gpadc *gpadc;
+	struct abx500_fg_platform_data *pdata;
+	struct abx500_bm_data *bat;
+	struct power_supply fg_psy;
+	struct workqueue_struct *fg_wq;
+	struct delayed_work fg_periodic_work;
+	struct delayed_work fg_low_bat_work;
+	struct delayed_work fg_reinit_work;
+	struct work_struct fg_work;
+	struct work_struct fg_acc_cur_work;
+	struct delayed_work fg_check_hw_failure_work;
+	struct mutex cc_lock;
+	struct kobject fg_kobject;
+};
+static LIST_HEAD(ab8500_fg_list);
+
+/**
+ * ab8500_fg_get() - returns a reference to the primary AB8500 fuel gauge
+ * (i.e. the first fuel gauge in the instance list)
+ */
+struct ab8500_fg *ab8500_fg_get(void)
+{
+	struct ab8500_fg *fg;
+
+	if (list_empty(&ab8500_fg_list))
+		return NULL;
+
+	fg = list_first_entry(&ab8500_fg_list, struct ab8500_fg, node);
+	return fg;
+}
+
+/* Main battery properties */
+static enum power_supply_property ab8500_fg_props[] = {
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+	POWER_SUPPLY_PROP_ENERGY_FULL,
+	POWER_SUPPLY_PROP_ENERGY_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+};
+
+/*
+ * This array maps the raw hex value to lowbat voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_fg_lowbat_voltage_map[] = {
+	2300 ,
+	2325 ,
+	2350 ,
+	2375 ,
+	2400 ,
+	2425 ,
+	2450 ,
+	2475 ,
+	2500 ,
+	2525 ,
+	2550 ,
+	2575 ,
+	2600 ,
+	2625 ,
+	2650 ,
+	2675 ,
+	2700 ,
+	2725 ,
+	2750 ,
+	2775 ,
+	2800 ,
+	2825 ,
+	2850 ,
+	2875 ,
+	2900 ,
+	2925 ,
+	2950 ,
+	2975 ,
+	3000 ,
+	3025 ,
+	3050 ,
+	3075 ,
+	3100 ,
+	3125 ,
+	3150 ,
+	3175 ,
+	3200 ,
+	3225 ,
+	3250 ,
+	3275 ,
+	3300 ,
+	3325 ,
+	3350 ,
+	3375 ,
+	3400 ,
+	3425 ,
+	3450 ,
+	3475 ,
+	3500 ,
+	3525 ,
+	3550 ,
+	3575 ,
+	3600 ,
+	3625 ,
+	3650 ,
+	3675 ,
+	3700 ,
+	3725 ,
+	3750 ,
+	3775 ,
+	3800 ,
+	3825 ,
+	3850 ,
+	3850 ,
+};
+
+static u8 ab8500_volt_to_regval(int voltage)
+{
+	int i;
+
+	if (voltage < ab8500_fg_lowbat_voltage_map[0])
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(ab8500_fg_lowbat_voltage_map); i++) {
+		if (voltage < ab8500_fg_lowbat_voltage_map[i])
+			return (u8) i - 1;
+	}
+
+	/* If not captured above, return index of last element */
+	return (u8) ARRAY_SIZE(ab8500_fg_lowbat_voltage_map) - 1;
+}
+
+/**
+ * ab8500_fg_is_low_curr() - Low or high current mode
+ * @di:		pointer to the ab8500_fg structure
+ * @curr:	the current to base or our decision on
+ *
+ * Low current mode if the current consumption is below a certain threshold
+ */
+static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
+{
+	/*
+	 * We want to know if we're in low current mode
+	 */
+	if (curr > -di->bat->fg_params->high_curr_threshold)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * ab8500_fg_add_cap_sample() - Add capacity to average filter
+ * @di:		pointer to the ab8500_fg structure
+ * @sample:	the capacity in mAh to add to the filter
+ *
+ * A capacity is added to the filter and a new mean capacity is calculated and
+ * returned
+ */
+static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
+{
+	struct timespec ts;
+	struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+	getnstimeofday(&ts);
+
+	do {
+		avg->sum += sample - avg->samples[avg->pos];
+		avg->samples[avg->pos] = sample;
+		avg->time_stamps[avg->pos] = ts.tv_sec;
+		avg->pos++;
+
+		if (avg->pos == NBR_AVG_SAMPLES)
+			avg->pos = 0;
+
+		if (avg->nbr_samples < NBR_AVG_SAMPLES)
+			avg->nbr_samples++;
+
+		/*
+		 * Check the time stamp for each sample. If too old,
+		 * replace with latest sample
+		 */
+	} while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+
+	avg->avg = avg->sum / avg->nbr_samples;
+
+	return avg->avg;
+}
+
+/**
+ * ab8500_fg_clear_cap_samples() - Clear average filter
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * The capacity filter is is reset to zero.
+ */
+static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
+{
+	int i;
+	struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+	avg->pos = 0;
+	avg->nbr_samples = 0;
+	avg->sum = 0;
+	avg->avg = 0;
+
+	for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+		avg->samples[i] = 0;
+		avg->time_stamps[i] = 0;
+	}
+}
+
+/**
+ * ab8500_fg_fill_cap_sample() - Fill average filter
+ * @di:		pointer to the ab8500_fg structure
+ * @sample:	the capacity in mAh to fill the filter with
+ *
+ * The capacity filter is filled with a capacity in mAh
+ */
+static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
+{
+	int i;
+	struct timespec ts;
+	struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+	getnstimeofday(&ts);
+
+	for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+		avg->samples[i] = sample;
+		avg->time_stamps[i] = ts.tv_sec;
+	}
+
+	avg->pos = 0;
+	avg->nbr_samples = NBR_AVG_SAMPLES;
+	avg->sum = sample * NBR_AVG_SAMPLES;
+	avg->avg = sample;
+}
+
+/**
+ * ab8500_fg_coulomb_counter() - enable coulomb counter
+ * @di:		pointer to the ab8500_fg structure
+ * @enable:	enable/disable
+ *
+ * Enable/Disable coulomb counter.
+ * On failure returns negative value.
+ */
+static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
+{
+	int ret = 0;
+	mutex_lock(&di->cc_lock);
+	if (enable) {
+		/* To be able to reprogram the number of samples, we have to
+		 * first stop the CC and then enable it again */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8500_RTC_CC_CONF_REG, 0x00);
+		if (ret)
+			goto cc_err;
+
+		/* Program the samples */
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+			di->fg_samples);
+		if (ret)
+			goto cc_err;
+
+		/* Start the CC */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8500_RTC_CC_CONF_REG,
+			(CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+		if (ret)
+			goto cc_err;
+
+		di->flags.fg_enabled = true;
+	} else {
+		/* Clear any pending read requests */
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+		if (ret)
+			goto cc_err;
+
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU_CTRL, 0);
+		if (ret)
+			goto cc_err;
+
+		/* Stop the CC */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8500_RTC_CC_CONF_REG, 0);
+		if (ret)
+			goto cc_err;
+
+		di->flags.fg_enabled = false;
+
+	}
+	dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
+		enable, di->fg_samples);
+
+	mutex_unlock(&di->cc_lock);
+
+	return ret;
+cc_err:
+	dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
+	mutex_unlock(&di->cc_lock);
+	return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_start() - start battery instantaneous current
+ * @di:         pointer to the ab8500_fg structure
+ *
+ * Returns 0 or error code
+ * Note: This is part "one" and has to be called before
+ * ab8500_fg_inst_curr_finalize()
+ */
+ int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+{
+	u8 reg_val;
+	int ret;
+
+	mutex_lock(&di->cc_lock);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+		AB8500_RTC_CC_CONF_REG, &reg_val);
+	if (ret < 0)
+		goto fail;
+
+	if (!(reg_val & CC_PWR_UP_ENA)) {
+		dev_dbg(di->dev, "%s Enable FG\n", __func__);
+		di->turn_off_fg = true;
+
+		/* Program the samples */
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+			SEC_TO_SAMPLE(10));
+		if (ret)
+			goto fail;
+
+		/* Start the CC */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8500_RTC_CC_CONF_REG,
+			(CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+		if (ret)
+			goto fail;
+	} else {
+		di->turn_off_fg = false;
+	}
+
+	/* Return and WFI */
+	INIT_COMPLETION(di->ab8500_fg_complete);
+	enable_irq(di->irq);
+
+	/* Note: cc_lock is still locked */
+	return 0;
+fail:
+	mutex_unlock(&di->cc_lock);
+	return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_done() - check if fg conversion is done
+ * @di:         pointer to the ab8500_fg structure
+ *
+ * Returns 1 if conversion done, 0 if still waiting
+ */
+int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
+{
+	return completion_done(&di->ab8500_fg_complete);
+}
+
+/**
+ * ab8500_fg_inst_curr_finalize() - battery instantaneous current
+ * @di:         pointer to the ab8500_fg structure
+ * @res:	battery instantenous current(on success)
+ *
+ * Returns 0 or an error code
+ * Note: This is part "two" and has to be called at earliest 250 ms
+ * after ab8500_fg_inst_curr_start()
+ */
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
+{
+	u8 low, high;
+	int val;
+	int ret;
+	int timeout;
+
+	if (!completion_done(&di->ab8500_fg_complete)) {
+		timeout = wait_for_completion_timeout(&di->ab8500_fg_complete,
+			INS_CURR_TIMEOUT);
+		dev_dbg(di->dev, "Finalize time: %d ms\n",
+			((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
+		if (!timeout) {
+			ret = -ETIME;
+			disable_irq(di->irq);
+			dev_err(di->dev, "completion timed out [%d]\n",
+				__LINE__);
+			goto fail;
+		}
+	}
+
+	disable_irq(di->irq);
+
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+			READ_REQ, READ_REQ);
+
+	/* 100uS between read request and read is needed */
+	usleep_range(100, 100);
+
+	/* Read CC Sample conversion value Low and high */
+	ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+		AB8500_GASG_CC_SMPL_CNVL_REG,  &low);
+	if (ret < 0)
+		goto fail;
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+		AB8500_GASG_CC_SMPL_CNVH_REG,  &high);
+	if (ret < 0)
+		goto fail;
+
+	/*
+	 * negative value for Discharging
+	 * convert 2's compliment into decimal
+	 */
+	if (high & 0x10)
+		val = (low | (high << 8) | 0xFFFFE000);
+	else
+		val = (low | (high << 8));
+
+	/*
+	 * Convert to unit value in mA
+	 * Full scale input voltage is
+	 * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+	 * Given a 250ms conversion cycle time the LSB corresponds
+	 * to 112.9 nAh. Convert to current by dividing by the conversion
+	 * time in hours (250ms = 1 / (3600 * 4)h)
+	 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+	 */
+	val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
+		(1000 * di->bat->fg_res);
+
+	if (di->turn_off_fg) {
+		dev_dbg(di->dev, "%s Disable FG\n", __func__);
+
+		/* Clear any pending read requests */
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+		if (ret)
+			goto fail;
+
+		/* Stop the CC */
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8500_RTC_CC_CONF_REG, 0);
+		if (ret)
+			goto fail;
+	}
+	mutex_unlock(&di->cc_lock);
+	(*res) = val;
+
+	return 0;
+fail:
+	mutex_unlock(&di->cc_lock);
+	return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_blocking() - battery instantaneous current
+ * @di:         pointer to the ab8500_fg structure
+ * @res:	battery instantenous current(on success)
+ *
+ * Returns 0 else error code
+ */
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
+{
+	int ret;
+	int res = 0;
+
+	ret = ab8500_fg_inst_curr_start(di);
+	if (ret) {
+		dev_err(di->dev, "Failed to initialize fg_inst\n");
+		return 0;
+	}
+
+	ret = ab8500_fg_inst_curr_finalize(di, &res);
+	if (ret) {
+		dev_err(di->dev, "Failed to finalize fg_inst\n");
+		return 0;
+	}
+
+	return res;
+}
+
+/**
+ * ab8500_fg_acc_cur_work() - average battery current
+ * @work:	pointer to the work_struct structure
+ *
+ * Updated the average battery current obtained from the
+ * coulomb counter.
+ */
+static void ab8500_fg_acc_cur_work(struct work_struct *work)
+{
+	int val;
+	int ret;
+	u8 low, med, high;
+
+	struct ab8500_fg *di = container_of(work,
+		struct ab8500_fg, fg_acc_cur_work);
+
+	mutex_lock(&di->cc_lock);
+	ret = abx500_set_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+		AB8500_GASG_CC_NCOV_ACCU_CTRL, RD_NCONV_ACCU_REQ);
+	if (ret)
+		goto exit;
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+		AB8500_GASG_CC_NCOV_ACCU_LOW,  &low);
+	if (ret < 0)
+		goto exit;
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+		AB8500_GASG_CC_NCOV_ACCU_MED,  &med);
+	if (ret < 0)
+		goto exit;
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+		AB8500_GASG_CC_NCOV_ACCU_HIGH, &high);
+	if (ret < 0)
+		goto exit;
+
+	/* Check for sign bit in case of negative value, 2's compliment */
+	if (high & 0x10)
+		val = (low | (med << 8) | (high << 16) | 0xFFE00000);
+	else
+		val = (low | (med << 8) | (high << 16));
+
+	/*
+	 * Convert to uAh
+	 * Given a 250ms conversion cycle time the LSB corresponds
+	 * to 112.9 nAh.
+	 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+	 */
+	di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) /
+		(100 * di->bat->fg_res);
+
+	/*
+	 * Convert to unit value in mA
+	 * Full scale input voltage is
+	 * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+	 * Given a 250ms conversion cycle time the LSB corresponds
+	 * to 112.9 nAh. Convert to current by dividing by the conversion
+	 * time in hours (= samples / (3600 * 4)h)
+	 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+	 */
+	di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
+		(1000 * di->bat->fg_res * (di->fg_samples / 4));
+
+	di->flags.conv_done = true;
+
+	mutex_unlock(&di->cc_lock);
+
+	queue_work(di->fg_wq, &di->fg_work);
+
+	return;
+exit:
+	dev_err(di->dev,
+		"Failed to read or write gas gauge registers\n");
+	mutex_unlock(&di->cc_lock);
+	queue_work(di->fg_wq, &di->fg_work);
+}
+
+/**
+ * ab8500_fg_bat_voltage() - get battery voltage
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Returns battery voltage(on success) else error code
+ */
+static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
+{
+	int vbat;
+	static int prev;
+
+	vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+	if (vbat < 0) {
+		dev_err(di->dev,
+			"%s gpadc conversion failed, using previous value\n",
+			__func__);
+		return prev;
+	}
+
+	prev = vbat;
+	return vbat;
+}
+
+/**
+ * ab8500_fg_volt_to_capacity() - Voltage based capacity
+ * @di:		pointer to the ab8500_fg structure
+ * @voltage:	The voltage to convert to a capacity
+ *
+ * Returns battery capacity in per mille based on voltage
+ */
+static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
+{
+	int i, tbl_size;
+	struct abx500_v_to_cap *tbl;
+	int cap = 0;
+
+	tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+	tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+
+	for (i = 0; i < tbl_size; ++i) {
+		if (voltage > tbl[i].voltage)
+			break;
+	}
+
+	if ((i > 0) && (i < tbl_size)) {
+		cap = interpolate(voltage,
+			tbl[i].voltage,
+			tbl[i].capacity * 10,
+			tbl[i-1].voltage,
+			tbl[i-1].capacity * 10);
+	} else if (i == 0) {
+		cap = 1000;
+	} else {
+		cap = 0;
+	}
+
+	dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
+		__func__, voltage, cap);
+
+	return cap;
+}
+
+/**
+ * ab8500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is not compensated
+ * for the voltage drop due to the load
+ */
+static int ab8500_fg_uncomp_volt_to_capacity(struct ab8500_fg *di)
+{
+	di->vbat = ab8500_fg_bat_voltage(di);
+	return ab8500_fg_volt_to_capacity(di, di->vbat);
+}
+
+/**
+ * ab8500_fg_battery_resistance() - Returns the battery inner resistance
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Returns battery inner resistance added with the fuel gauge resistor value
+ * to get the total resistance in the whole link from gnd to bat+ node.
+ */
+static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
+{
+	int i, tbl_size;
+	struct batres_vs_temp *tbl;
+	int resist = 0;
+
+	tbl = di->bat->bat_type[di->bat->batt_id].batres_tbl;
+	tbl_size = di->bat->bat_type[di->bat->batt_id].n_batres_tbl_elements;
+
+	for (i = 0; i < tbl_size; ++i) {
+		if (di->bat_temp / 10 > tbl[i].temp)
+			break;
+	}
+
+	if ((i > 0) && (i < tbl_size)) {
+		resist = interpolate(di->bat_temp / 10,
+			tbl[i].temp,
+			tbl[i].resist,
+			tbl[i-1].temp,
+			tbl[i-1].resist);
+	} else if (i == 0) {
+		resist = tbl[0].resist;
+	} else {
+		resist = tbl[tbl_size - 1].resist;
+	}
+
+	dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
+	    " fg resistance %d, total: %d (mOhm)\n",
+		__func__, di->bat_temp, resist, di->bat->fg_res / 10,
+		(di->bat->fg_res / 10) + resist);
+
+	/* fg_res variable is in 0.1mOhm */
+	resist += di->bat->fg_res / 10;
+
+	return resist;
+}
+
+/**
+ * ab8500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is load compensated
+ * for the voltage drop
+ */
+static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
+{
+	int vbat_comp, res;
+	int i = 0;
+	int vbat = 0;
+
+	ab8500_fg_inst_curr_start(di);
+
+	do {
+		vbat += ab8500_fg_bat_voltage(di);
+		i++;
+		msleep(5);
+	} while (!ab8500_fg_inst_curr_done(di));
+
+	ab8500_fg_inst_curr_finalize(di, &di->inst_curr);
+
+	di->vbat = vbat / i;
+	res = ab8500_fg_battery_resistance(di);
+
+	/* Use Ohms law to get the load compensated voltage */
+	vbat_comp = di->vbat - (di->inst_curr * res) / 1000;
+
+	dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
+		"R: %dmOhm, Current: %dmA Vbat Samples: %d\n",
+		__func__, di->vbat, vbat_comp, res, di->inst_curr, i);
+
+	return ab8500_fg_volt_to_capacity(di, vbat_comp);
+}
+
+/**
+ * ab8500_fg_convert_mah_to_permille() - Capacity in mAh to permille
+ * @di:		pointer to the ab8500_fg structure
+ * @cap_mah:	capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in permille
+ */
+static int ab8500_fg_convert_mah_to_permille(struct ab8500_fg *di, int cap_mah)
+{
+	return (cap_mah * 1000) / di->bat_cap.max_mah_design;
+}
+
+/**
+ * ab8500_fg_convert_permille_to_mah() - Capacity in permille to mAh
+ * @di:		pointer to the ab8500_fg structure
+ * @cap_pm:	capacity in permille
+ *
+ * Converts capacity in permille to capacity in mAh
+ */
+static int ab8500_fg_convert_permille_to_mah(struct ab8500_fg *di, int cap_pm)
+{
+	return cap_pm * di->bat_cap.max_mah_design / 1000;
+}
+
+/**
+ * ab8500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
+ * @di:		pointer to the ab8500_fg structure
+ * @cap_mah:	capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in uWh
+ */
+static int ab8500_fg_convert_mah_to_uwh(struct ab8500_fg *di, int cap_mah)
+{
+	u64 div_res;
+	u32 div_rem;
+
+	div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
+	div_rem = do_div(div_res, 1000);
+
+	/* Make sure to round upwards if necessary */
+	if (div_rem >= 1000 / 2)
+		div_res++;
+
+	return (int) div_res;
+}
+
+/**
+ * ab8500_fg_calc_cap_charging() - Calculate remaining capacity while charging
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. The filter is filled with this capacity
+ */
+static int ab8500_fg_calc_cap_charging(struct ab8500_fg *di)
+{
+	dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+		__func__,
+		di->bat_cap.mah,
+		di->accu_charge);
+
+	/* Capacity should not be less than 0 */
+	if (di->bat_cap.mah + di->accu_charge > 0)
+		di->bat_cap.mah += di->accu_charge;
+	else
+		di->bat_cap.mah = 0;
+	/*
+	 * We force capacity to 100% once when the algorithm
+	 * reports that it's full.
+	 */
+	if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
+		di->flags.force_full) {
+		di->bat_cap.mah = di->bat_cap.max_mah_design;
+	}
+
+	ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+	di->bat_cap.permille =
+		ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+	/* We need to update battery voltage and inst current when charging */
+	di->vbat = ab8500_fg_bat_voltage(di);
+	di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+	return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
+ * @di:		pointer to the ab8500_fg structure
+ * @comp:	if voltage should be load compensated before capacity calc
+ *
+ * Return the capacity in mAh based on the battery voltage. The voltage can
+ * either be load compensated or not. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_voltage(struct ab8500_fg *di, bool comp)
+{
+	int permille, mah;
+
+	if (comp)
+		permille = ab8500_fg_load_comp_volt_to_capacity(di);
+	else
+		permille = ab8500_fg_uncomp_volt_to_capacity(di);
+
+	mah = ab8500_fg_convert_permille_to_mah(di, permille);
+
+	di->bat_cap.mah = ab8500_fg_add_cap_sample(di, mah);
+	di->bat_cap.permille =
+		ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+	return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_fg(struct ab8500_fg *di)
+{
+	int permille_volt, permille;
+
+	dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+		__func__,
+		di->bat_cap.mah,
+		di->accu_charge);
+
+	/* Capacity should not be less than 0 */
+	if (di->bat_cap.mah + di->accu_charge > 0)
+		di->bat_cap.mah += di->accu_charge;
+	else
+		di->bat_cap.mah = 0;
+
+	if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
+		di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+	/*
+	 * Check against voltage based capacity. It can not be lower
+	 * than what the uncompensated voltage says
+	 */
+	permille = ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+	permille_volt = ab8500_fg_uncomp_volt_to_capacity(di);
+
+	if (permille < permille_volt) {
+		di->bat_cap.permille = permille_volt;
+		di->bat_cap.mah = ab8500_fg_convert_permille_to_mah(di,
+			di->bat_cap.permille);
+
+		dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
+			__func__,
+			permille,
+			permille_volt);
+
+		ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+	} else {
+		ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+		di->bat_cap.permille =
+			ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+	}
+
+	return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_capacity_level() - Get the battery capacity level
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Get the battery capacity level based on the capacity in percent
+ */
+static int ab8500_fg_capacity_level(struct ab8500_fg *di)
+{
+	int ret, percent;
+
+	percent = di->bat_cap.permille / 10;
+
+	if (percent <= di->bat->cap_levels->critical ||
+		di->flags.low_bat)
+		ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+	else if (percent <= di->bat->cap_levels->low)
+		ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+	else if (percent <= di->bat->cap_levels->normal)
+		ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+	else if (percent <= di->bat->cap_levels->high)
+		ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+	else
+		ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+	return ret;
+}
+
+/**
+ * ab8500_fg_check_capacity_limits() - Check if capacity has changed
+ * @di:		pointer to the ab8500_fg structure
+ * @init:	capacity is allowed to go up in init mode
+ *
+ * Check if capacity or capacity limit has changed and notify the system
+ * about it using the power_supply framework
+ */
+static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
+{
+	bool changed = false;
+
+	di->bat_cap.level = ab8500_fg_capacity_level(di);
+
+	if (di->bat_cap.level != di->bat_cap.prev_level) {
+		/*
+		 * We do not allow reported capacity level to go up
+		 * unless we're charging or if we're in init
+		 */
+		if (!(!di->flags.charging && di->bat_cap.level >
+			di->bat_cap.prev_level) || init) {
+			dev_dbg(di->dev, "level changed from %d to %d\n",
+				di->bat_cap.prev_level,
+				di->bat_cap.level);
+			di->bat_cap.prev_level = di->bat_cap.level;
+			changed = true;
+		} else {
+			dev_dbg(di->dev, "level not allowed to go up "
+				"since no charger is connected: %d to %d\n",
+				di->bat_cap.prev_level,
+				di->bat_cap.level);
+		}
+	}
+
+	/*
+	 * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
+	 * shutdown
+	 */
+	if (di->flags.low_bat) {
+		dev_dbg(di->dev, "Battery low, set capacity to 0\n");
+		di->bat_cap.prev_percent = 0;
+		di->bat_cap.permille = 0;
+		di->bat_cap.prev_mah = 0;
+		di->bat_cap.mah = 0;
+		changed = true;
+	} else if (di->flags.fully_charged) {
+		/*
+		 * We report 100% if algorithm reported fully charged
+		 * unless capacity drops too much
+		 */
+		if (di->flags.force_full) {
+			di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+			di->bat_cap.prev_mah = di->bat_cap.mah;
+		} else if (!di->flags.force_full &&
+			di->bat_cap.prev_percent !=
+			(di->bat_cap.permille) / 10 &&
+			(di->bat_cap.permille / 10) <
+			di->bat->fg_params->maint_thres) {
+			dev_dbg(di->dev,
+				"battery reported full "
+				"but capacity dropping: %d\n",
+				di->bat_cap.permille / 10);
+			di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+			di->bat_cap.prev_mah = di->bat_cap.mah;
+
+			changed = true;
+		}
+	} else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
+		if (di->bat_cap.permille / 10 == 0) {
+			/*
+			 * We will not report 0% unless we've got
+			 * the LOW_BAT IRQ, no matter what the FG
+			 * algorithm says.
+			 */
+			di->bat_cap.prev_percent = 1;
+			di->bat_cap.permille = 1;
+			di->bat_cap.prev_mah = 1;
+			di->bat_cap.mah = 1;
+
+			changed = true;
+		} else if (!(!di->flags.charging &&
+			(di->bat_cap.permille / 10) >
+			di->bat_cap.prev_percent) || init) {
+			/*
+			 * We do not allow reported capacity to go up
+			 * unless we're charging or if we're in init
+			 */
+			dev_dbg(di->dev,
+				"capacity changed from %d to %d (%d)\n",
+				di->bat_cap.prev_percent,
+				di->bat_cap.permille / 10,
+				di->bat_cap.permille);
+			di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+			di->bat_cap.prev_mah = di->bat_cap.mah;
+
+			changed = true;
+		} else {
+			dev_dbg(di->dev, "capacity not allowed to go up since "
+				"no charger is connected: %d to %d (%d)\n",
+				di->bat_cap.prev_percent,
+				di->bat_cap.permille / 10,
+				di->bat_cap.permille);
+		}
+	}
+
+	if (changed) {
+		power_supply_changed(&di->fg_psy);
+		if (di->flags.fully_charged && di->flags.force_full) {
+			dev_dbg(di->dev, "Battery full, notifying.\n");
+			di->flags.force_full = false;
+			sysfs_notify(&di->fg_kobject, NULL, "charge_full");
+		}
+		sysfs_notify(&di->fg_kobject, NULL, "charge_now");
+	}
+}
+
+static void ab8500_fg_charge_state_to(struct ab8500_fg *di,
+	enum ab8500_fg_charge_state new_state)
+{
+	dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
+		di->charge_state,
+		charge_state[di->charge_state],
+		new_state,
+		charge_state[new_state]);
+
+	di->charge_state = new_state;
+}
+
+static void ab8500_fg_discharge_state_to(struct ab8500_fg *di,
+	enum ab8500_fg_discharge_state new_state)
+{
+	dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+		di->discharge_state,
+		discharge_state[di->discharge_state],
+		new_state,
+		discharge_state[new_state]);
+
+	di->discharge_state = new_state;
+}
+
+/**
+ * ab8500_fg_algorithm_charging() - FG algorithm for when charging
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're charging
+ */
+static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
+{
+	/*
+	 * If we change to discharge mode
+	 * we should start with recovery
+	 */
+	if (di->discharge_state != AB8500_FG_DISCHARGE_INIT_RECOVERY)
+		ab8500_fg_discharge_state_to(di,
+			AB8500_FG_DISCHARGE_INIT_RECOVERY);
+
+	switch (di->charge_state) {
+	case AB8500_FG_CHARGE_INIT:
+		di->fg_samples = SEC_TO_SAMPLE(
+			di->bat->fg_params->accu_charging);
+
+		ab8500_fg_coulomb_counter(di, true);
+		ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
+
+		break;
+
+	case AB8500_FG_CHARGE_READOUT:
+		/*
+		 * Read the FG and calculate the new capacity
+		 */
+		mutex_lock(&di->cc_lock);
+		if (!di->flags.conv_done) {
+			/* Wasn't the CC IRQ that got us here */
+			mutex_unlock(&di->cc_lock);
+			dev_dbg(di->dev, "%s CC conv not done\n",
+				__func__);
+
+			break;
+		}
+		di->flags.conv_done = false;
+		mutex_unlock(&di->cc_lock);
+
+		ab8500_fg_calc_cap_charging(di);
+
+		break;
+
+	default:
+		break;
+	}
+
+	/* Check capacity limits */
+	ab8500_fg_check_capacity_limits(di, false);
+}
+
+static void force_capacity(struct ab8500_fg *di)
+{
+	int cap;
+
+	ab8500_fg_clear_cap_samples(di);
+	cap = di->bat_cap.user_mah;
+	if (cap > di->bat_cap.max_mah_design) {
+		dev_dbg(di->dev, "Remaining cap %d can't be bigger than total"
+			" %d\n", cap, di->bat_cap.max_mah_design);
+		cap = di->bat_cap.max_mah_design;
+	}
+	ab8500_fg_fill_cap_sample(di, di->bat_cap.user_mah);
+	di->bat_cap.permille = ab8500_fg_convert_mah_to_permille(di, cap);
+	di->bat_cap.mah = cap;
+	ab8500_fg_check_capacity_limits(di, true);
+}
+
+static bool check_sysfs_capacity(struct ab8500_fg *di)
+{
+	int cap, lower, upper;
+	int cap_permille;
+
+	cap = di->bat_cap.user_mah;
+
+	cap_permille = ab8500_fg_convert_mah_to_permille(di,
+		di->bat_cap.user_mah);
+
+	lower = di->bat_cap.permille - di->bat->fg_params->user_cap_limit * 10;
+	upper = di->bat_cap.permille + di->bat->fg_params->user_cap_limit * 10;
+
+	if (lower < 0)
+		lower = 0;
+	/* 1000 is permille, -> 100 percent */
+	if (upper > 1000)
+		upper = 1000;
+
+	dev_dbg(di->dev, "Capacity limits:"
+		" (Lower: %d User: %d Upper: %d) [user: %d, was: %d]\n",
+		lower, cap_permille, upper, cap, di->bat_cap.mah);
+
+	/* If within limits, use the saved capacity and exit estimation...*/
+	if (cap_permille > lower && cap_permille < upper) {
+		dev_dbg(di->dev, "OK! Using users cap %d uAh now\n", cap);
+		force_capacity(di);
+		return true;
+	}
+	dev_dbg(di->dev, "Capacity from user out of limits, ignoring");
+	return false;
+}
+
+/**
+ * ab8500_fg_algorithm_discharging() - FG algorithm for when discharging
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're discharging
+ */
+static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
+{
+	int sleep_time;
+
+	/* If we change to charge mode we should start with init */
+	if (di->charge_state != AB8500_FG_CHARGE_INIT)
+		ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+
+	switch (di->discharge_state) {
+	case AB8500_FG_DISCHARGE_INIT:
+		/* We use the FG IRQ to work on */
+		di->init_cnt = 0;
+		di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+		ab8500_fg_coulomb_counter(di, true);
+		ab8500_fg_discharge_state_to(di,
+			AB8500_FG_DISCHARGE_INITMEASURING);
+
+		/* Intentional fallthrough */
+	case AB8500_FG_DISCHARGE_INITMEASURING:
+		/*
+		 * Discard a number of samples during startup.
+		 * After that, use compensated voltage for a few
+		 * samples to get an initial capacity.
+		 * Then go to READOUT
+		 */
+		sleep_time = di->bat->fg_params->init_timer;
+
+		/* Discard the first [x] seconds */
+		if (di->init_cnt >
+			di->bat->fg_params->init_discard_time) {
+			ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+			ab8500_fg_check_capacity_limits(di, true);
+		}
+
+		di->init_cnt += sleep_time;
+		if (di->init_cnt > di->bat->fg_params->init_total_time)
+			ab8500_fg_discharge_state_to(di,
+				AB8500_FG_DISCHARGE_READOUT_INIT);
+
+		break;
+
+	case AB8500_FG_DISCHARGE_INIT_RECOVERY:
+		di->recovery_cnt = 0;
+		di->recovery_needed = true;
+		ab8500_fg_discharge_state_to(di,
+			AB8500_FG_DISCHARGE_RECOVERY);
+
+		/* Intentional fallthrough */
+
+	case AB8500_FG_DISCHARGE_RECOVERY:
+		sleep_time = di->bat->fg_params->recovery_sleep_timer;
+
+		/*
+		 * We should check the power consumption
+		 * If low, go to READOUT (after x min) or
+		 * RECOVERY_SLEEP if time left.
+		 * If high, go to READOUT
+		 */
+		di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+		if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+			if (di->recovery_cnt >
+				di->bat->fg_params->recovery_total_time) {
+				di->fg_samples = SEC_TO_SAMPLE(
+					di->bat->fg_params->accu_high_curr);
+				ab8500_fg_coulomb_counter(di, true);
+				ab8500_fg_discharge_state_to(di,
+					AB8500_FG_DISCHARGE_READOUT);
+				di->recovery_needed = false;
+			} else {
+				queue_delayed_work(di->fg_wq,
+					&di->fg_periodic_work,
+					sleep_time * HZ);
+			}
+			di->recovery_cnt += sleep_time;
+		} else {
+			di->fg_samples = SEC_TO_SAMPLE(
+				di->bat->fg_params->accu_high_curr);
+			ab8500_fg_coulomb_counter(di, true);
+			ab8500_fg_discharge_state_to(di,
+				AB8500_FG_DISCHARGE_READOUT);
+		}
+		break;
+
+	case AB8500_FG_DISCHARGE_READOUT_INIT:
+		di->fg_samples = SEC_TO_SAMPLE(
+			di->bat->fg_params->accu_high_curr);
+		ab8500_fg_coulomb_counter(di, true);
+		ab8500_fg_discharge_state_to(di,
+				AB8500_FG_DISCHARGE_READOUT);
+		break;
+
+	case AB8500_FG_DISCHARGE_READOUT:
+		di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+		if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+			/* Detect mode change */
+			if (di->high_curr_mode) {
+				di->high_curr_mode = false;
+				di->high_curr_cnt = 0;
+			}
+
+			if (di->recovery_needed) {
+				ab8500_fg_discharge_state_to(di,
+					AB8500_FG_DISCHARGE_RECOVERY);
+
+				queue_delayed_work(di->fg_wq,
+					&di->fg_periodic_work, 0);
+
+				break;
+			}
+
+			ab8500_fg_calc_cap_discharge_voltage(di, true);
+		} else {
+			mutex_lock(&di->cc_lock);
+			if (!di->flags.conv_done) {
+				/* Wasn't the CC IRQ that got us here */
+				mutex_unlock(&di->cc_lock);
+				dev_dbg(di->dev, "%s CC conv not done\n",
+					__func__);
+
+				break;
+			}
+			di->flags.conv_done = false;
+			mutex_unlock(&di->cc_lock);
+
+			/* Detect mode change */
+			if (!di->high_curr_mode) {
+				di->high_curr_mode = true;
+				di->high_curr_cnt = 0;
+			}
+
+			di->high_curr_cnt +=
+				di->bat->fg_params->accu_high_curr;
+			if (di->high_curr_cnt >
+				di->bat->fg_params->high_curr_time)
+				di->recovery_needed = true;
+
+			ab8500_fg_calc_cap_discharge_fg(di);
+		}
+
+		ab8500_fg_check_capacity_limits(di, false);
+
+		break;
+
+	case AB8500_FG_DISCHARGE_WAKEUP:
+		ab8500_fg_coulomb_counter(di, true);
+		di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+		ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+		di->fg_samples = SEC_TO_SAMPLE(
+			di->bat->fg_params->accu_high_curr);
+		ab8500_fg_coulomb_counter(di, true);
+		ab8500_fg_discharge_state_to(di,
+				AB8500_FG_DISCHARGE_READOUT);
+
+		ab8500_fg_check_capacity_limits(di, false);
+
+		break;
+
+	default:
+		break;
+	}
+}
+
+/**
+ * ab8500_fg_algorithm_calibrate() - Internal columb counter offset calibration
+ * @di:		pointer to the ab8500_fg structure
+ *
+ */
+static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
+{
+	int ret;
+
+	switch (di->calib_state) {
+	case AB8500_FG_CALIB_INIT:
+		dev_dbg(di->dev, "Calibration ongoing...\n");
+
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+			CC_INT_CAL_N_AVG_MASK, CC_INT_CAL_SAMPLES_8);
+		if (ret < 0)
+			goto err;
+
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+			CC_INTAVGOFFSET_ENA, CC_INTAVGOFFSET_ENA);
+		if (ret < 0)
+			goto err;
+		di->calib_state = AB8500_FG_CALIB_WAIT;
+		break;
+	case AB8500_FG_CALIB_END:
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+			CC_MUXOFFSET, CC_MUXOFFSET);
+		if (ret < 0)
+			goto err;
+		di->flags.calibrate = false;
+		dev_dbg(di->dev, "Calibration done...\n");
+		queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+		break;
+	case AB8500_FG_CALIB_WAIT:
+		dev_dbg(di->dev, "Calibration WFI\n");
+	default:
+		break;
+	}
+	return;
+err:
+	/* Something went wrong, don't calibrate then */
+	dev_err(di->dev, "failed to calibrate the CC\n");
+	di->flags.calibrate = false;
+	di->calib_state = AB8500_FG_CALIB_INIT;
+	queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+}
+
+/**
+ * ab8500_fg_algorithm() - Entry point for the FG algorithm
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Entry point for the battery capacity calculation state machine
+ */
+static void ab8500_fg_algorithm(struct ab8500_fg *di)
+{
+	if (di->flags.calibrate)
+		ab8500_fg_algorithm_calibrate(di);
+	else {
+		if (di->flags.charging)
+			ab8500_fg_algorithm_charging(di);
+		else
+			ab8500_fg_algorithm_discharging(di);
+	}
+
+	dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+		"%d %d %d %d %d %d %d\n",
+		di->bat_cap.max_mah_design,
+		di->bat_cap.mah,
+		di->bat_cap.permille,
+		di->bat_cap.level,
+		di->bat_cap.prev_mah,
+		di->bat_cap.prev_percent,
+		di->bat_cap.prev_level,
+		di->vbat,
+		di->inst_curr,
+		di->avg_curr,
+		di->accu_charge,
+		di->flags.charging,
+		di->charge_state,
+		di->discharge_state,
+		di->high_curr_mode,
+		di->recovery_needed);
+}
+
+/**
+ * ab8500_fg_periodic_work() - Run the FG state machine periodically
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for periodic work
+ */
+static void ab8500_fg_periodic_work(struct work_struct *work)
+{
+	struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+		fg_periodic_work.work);
+
+	if (di->init_capacity) {
+		/* A dummy read that will return 0 */
+		di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+		/* Get an initial capacity calculation */
+		ab8500_fg_calc_cap_discharge_voltage(di, true);
+		ab8500_fg_check_capacity_limits(di, true);
+		di->init_capacity = false;
+
+		queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+	} else if (di->flags.user_cap) {
+		if (check_sysfs_capacity(di)) {
+			ab8500_fg_check_capacity_limits(di, true);
+			if (di->flags.charging)
+				ab8500_fg_charge_state_to(di,
+					AB8500_FG_CHARGE_INIT);
+			else
+				ab8500_fg_discharge_state_to(di,
+					AB8500_FG_DISCHARGE_READOUT_INIT);
+		}
+		di->flags.user_cap = false;
+		queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+	} else
+		ab8500_fg_algorithm(di);
+
+}
+
+/**
+ * ab8500_fg_check_hw_failure_work() - Check OVV_BAT condition
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for checking the OVV_BAT condition
+ */
+static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
+{
+	int ret;
+	u8 reg_value;
+
+	struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+		fg_check_hw_failure_work.work);
+
+	/*
+	 * If we have had a battery over-voltage situation,
+	 * check ovv-bit to see if it should be reset.
+	 */
+	if (di->flags.bat_ovv) {
+		ret = abx500_get_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_CH_STAT_REG,
+			&reg_value);
+		if (ret < 0) {
+			dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+			return;
+		}
+		if ((reg_value & BATT_OVV) != BATT_OVV) {
+			dev_dbg(di->dev, "Battery recovered from OVV\n");
+			di->flags.bat_ovv = false;
+			power_supply_changed(&di->fg_psy);
+			return;
+		}
+
+		/* Not yet recovered from ovv, reschedule this test */
+		queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work,
+				   round_jiffies(HZ));
+	}
+}
+
+/**
+ * ab8500_fg_low_bat_work() - Check LOW_BAT condition
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for checking the LOW_BAT condition
+ */
+static void ab8500_fg_low_bat_work(struct work_struct *work)
+{
+	int vbat;
+
+	struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+		fg_low_bat_work.work);
+
+	vbat = ab8500_fg_bat_voltage(di);
+
+	/* Check if LOW_BAT still fulfilled */
+	if (vbat < di->bat->fg_params->lowbat_threshold) {
+		di->flags.low_bat = true;
+		dev_warn(di->dev, "Battery voltage still LOW\n");
+
+		/*
+		 * We need to re-schedule this check to be able to detect
+		 * if the voltage increases again during charging
+		 */
+		queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+			round_jiffies(LOW_BAT_CHECK_INTERVAL));
+	} else {
+		di->flags.low_bat = false;
+		dev_warn(di->dev, "Battery voltage OK again\n");
+	}
+
+	/* This is needed to dispatch LOW_BAT */
+	ab8500_fg_check_capacity_limits(di, false);
+
+	/* Set this flag to check if LOW_BAT IRQ still occurs */
+	di->flags.low_bat_delay = false;
+}
+
+/**
+ * ab8500_fg_battok_calc - calculate the bit pattern corresponding
+ * to the target voltage.
+ * @di:       pointer to the ab8500_fg structure
+ * @target    target voltage
+ *
+ * Returns bit pattern closest to the target voltage
+ * valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
+ */
+
+static int ab8500_fg_battok_calc(struct ab8500_fg *di, int target)
+{
+	if (target > BATT_OK_MIN +
+		(BATT_OK_INCREMENT * BATT_OK_MAX_NR_INCREMENTS))
+		return BATT_OK_MAX_NR_INCREMENTS;
+	if (target < BATT_OK_MIN)
+		return 0;
+	return (target - BATT_OK_MIN) / BATT_OK_INCREMENT;
+}
+
+/**
+ * ab8500_fg_battok_init_hw_register - init battok levels
+ * @di:       pointer to the ab8500_fg structure
+ *
+ */
+
+static int ab8500_fg_battok_init_hw_register(struct ab8500_fg *di)
+{
+	int selected;
+	int sel0;
+	int sel1;
+	int cbp_sel0;
+	int cbp_sel1;
+	int ret;
+	int new_val;
+
+	sel0 = di->bat->fg_params->battok_falling_th_sel0;
+	sel1 = di->bat->fg_params->battok_raising_th_sel1;
+
+	cbp_sel0 = ab8500_fg_battok_calc(di, sel0);
+	cbp_sel1 = ab8500_fg_battok_calc(di, sel1);
+
+	selected = BATT_OK_MIN + cbp_sel0 * BATT_OK_INCREMENT;
+
+	if (selected != sel0)
+		dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
+			sel0, selected, cbp_sel0);
+
+	selected = BATT_OK_MIN + cbp_sel1 * BATT_OK_INCREMENT;
+
+	if (selected != sel1)
+		dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
+			sel1, selected, cbp_sel1);
+
+	new_val = cbp_sel0 | (cbp_sel1 << 4);
+
+	dev_dbg(di->dev, "using: %x %d %d\n", new_val, cbp_sel0, cbp_sel1);
+	ret = abx500_set_register_interruptible(di->dev, AB8500_SYS_CTRL2_BLOCK,
+		AB8500_BATT_OK_REG, new_val);
+	return ret;
+}
+
+/**
+ * ab8500_fg_instant_work() - Run the FG state machine instantly
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for instant work
+ */
+static void ab8500_fg_instant_work(struct work_struct *work)
+{
+	struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_work);
+
+	ab8500_fg_algorithm(di);
+}
+
+/**
+ * ab8500_fg_cc_data_end_handler() - isr to get battery avg current.
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_data_end_handler(int irq, void *_di)
+{
+	struct ab8500_fg *di = _di;
+	complete(&di->ab8500_fg_complete);
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_int_calib_handler(int irq, void *_di)
+{
+	struct ab8500_fg *di = _di;
+	di->calib_state = AB8500_FG_CALIB_END;
+	queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_convend_handler(int irq, void *_di)
+{
+	struct ab8500_fg *di = _di;
+
+	queue_work(di->fg_wq, &di->fg_acc_cur_work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_batt_ovv_handler() - Battery OVV occured
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
+{
+	struct ab8500_fg *di = _di;
+
+	dev_dbg(di->dev, "Battery OVV\n");
+	di->flags.bat_ovv = true;
+	power_supply_changed(&di->fg_psy);
+
+	/* Schedule a new HW failure check */
+	queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 0);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_lowbatf_handler() - Battery voltage is below LOW threshold
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
+{
+	struct ab8500_fg *di = _di;
+
+	if (!di->flags.low_bat_delay) {
+		dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
+		di->flags.low_bat_delay = true;
+		/*
+		 * Start a timer to check LOW_BAT again after some time
+		 * This is done to avoid shutdown on single voltage dips
+		 */
+		queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+			round_jiffies(LOW_BAT_CHECK_INTERVAL));
+	}
+	return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_get_property() - get the fg properties
+ * @psy:	pointer to the power_supply structure
+ * @psp:	pointer to the power_supply_property structure
+ * @val:	pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * fg properties by reading the sysfs files.
+ * voltage_now:		battery voltage
+ * current_now:		battery instant current
+ * current_avg:		battery average current
+ * charge_full_design:	capacity where battery is considered full
+ * charge_now:		battery capacity in nAh
+ * capacity:		capacity in percent
+ * capacity_level:	capacity level
+ *
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_fg_get_property(struct power_supply *psy,
+	enum power_supply_property psp,
+	union power_supply_propval *val)
+{
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	/*
+	 * If battery is identified as unknown and charging of unknown
+	 * batteries is disabled, we always report 100% capacity and
+	 * capacity level UNKNOWN, since we can't calculate
+	 * remaining capacity
+	 */
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		if (di->flags.bat_ovv)
+			val->intval = BATT_OVV_VALUE * 1000;
+		else
+			val->intval = di->vbat * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = di->inst_curr * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_AVG:
+		val->intval = di->avg_curr * 1000;
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+		val->intval = ab8500_fg_convert_mah_to_uwh(di,
+				di->bat_cap.max_mah_design);
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_FULL:
+		val->intval = ab8500_fg_convert_mah_to_uwh(di,
+				di->bat_cap.max_mah);
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_NOW:
+		if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+				di->flags.batt_id_received)
+			val->intval = ab8500_fg_convert_mah_to_uwh(di,
+					di->bat_cap.max_mah);
+		else
+			val->intval = ab8500_fg_convert_mah_to_uwh(di,
+					di->bat_cap.prev_mah);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		val->intval = di->bat_cap.max_mah_design;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		val->intval = di->bat_cap.max_mah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+				di->flags.batt_id_received)
+			val->intval = di->bat_cap.max_mah;
+		else
+			val->intval = di->bat_cap.prev_mah;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+				di->flags.batt_id_received)
+			val->intval = 100;
+		else
+			val->intval = di->bat_cap.prev_percent;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+		if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+				di->flags.batt_id_received)
+			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+		else
+			val->intval = di->bat_cap.prev_level;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
+{
+	struct power_supply *psy;
+	struct power_supply *ext;
+	struct ab8500_fg *di;
+	union power_supply_propval ret;
+	int i, j;
+	bool psy_found = false;
+
+	psy = (struct power_supply *)data;
+	ext = dev_get_drvdata(dev);
+	di = to_ab8500_fg_device_info(psy);
+
+	/*
+	 * For all psy where the name of your driver
+	 * appears in any supplied_to
+	 */
+	for (i = 0; i < ext->num_supplicants; i++) {
+		if (!strcmp(ext->supplied_to[i], psy->name))
+			psy_found = true;
+	}
+
+	if (!psy_found)
+		return 0;
+
+	/* Go through all properties for the psy */
+	for (j = 0; j < ext->num_properties; j++) {
+		enum power_supply_property prop;
+		prop = ext->properties[j];
+
+		if (ext->get_property(ext, prop, &ret))
+			continue;
+
+		switch (prop) {
+		case POWER_SUPPLY_PROP_STATUS:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				switch (ret.intval) {
+				case POWER_SUPPLY_STATUS_UNKNOWN:
+				case POWER_SUPPLY_STATUS_DISCHARGING:
+				case POWER_SUPPLY_STATUS_NOT_CHARGING:
+					if (!di->flags.charging)
+						break;
+					di->flags.charging = false;
+					di->flags.fully_charged = false;
+					queue_work(di->fg_wq, &di->fg_work);
+					break;
+				case POWER_SUPPLY_STATUS_FULL:
+					if (di->flags.fully_charged)
+						break;
+					di->flags.fully_charged = true;
+					di->flags.force_full = true;
+					/* Save current capacity as maximum */
+					di->bat_cap.max_mah = di->bat_cap.mah;
+					queue_work(di->fg_wq, &di->fg_work);
+					break;
+				case POWER_SUPPLY_STATUS_CHARGING:
+					if (di->flags.charging)
+						break;
+					di->flags.charging = true;
+					di->flags.fully_charged = false;
+					queue_work(di->fg_wq, &di->fg_work);
+					break;
+				};
+			default:
+				break;
+			};
+			break;
+		case POWER_SUPPLY_PROP_TECHNOLOGY:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				if (!di->flags.batt_id_received) {
+					const struct abx500_battery_type *b;
+
+					b = &(di->bat->bat_type[di->bat->batt_id]);
+
+					di->flags.batt_id_received = true;
+
+					di->bat_cap.max_mah_design =
+						MILLI_TO_MICRO *
+						b->charge_full_design;
+
+					di->bat_cap.max_mah =
+						di->bat_cap.max_mah_design;
+
+					di->vbat_nom = b->nominal_voltage;
+				}
+
+				if (ret.intval)
+					di->flags.batt_unknown = false;
+				else
+					di->flags.batt_unknown = true;
+				break;
+			default:
+				break;
+			}
+			break;
+		case POWER_SUPPLY_PROP_TEMP:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+			    if (di->flags.batt_id_received)
+				di->bat_temp = ret.intval;
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * ab8500_fg_init_hw_registers() - Set up FG related registers
+ * @di:		pointer to the ab8500_fg structure
+ *
+ * Set up battery OVV, low battery voltage registers
+ */
+static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
+{
+	int ret;
+
+	/* Set VBAT OVV threshold */
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+		AB8500_CHARGER,
+		AB8500_BATT_OVV,
+		BATT_OVV_TH_4P75,
+		BATT_OVV_TH_4P75);
+	if (ret) {
+		dev_err(di->dev, "failed to set BATT_OVV\n");
+		goto out;
+	}
+
+	/* Enable VBAT OVV detection */
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+		AB8500_CHARGER,
+		AB8500_BATT_OVV,
+		BATT_OVV_ENA,
+		BATT_OVV_ENA);
+	if (ret) {
+		dev_err(di->dev, "failed to enable BATT_OVV\n");
+		goto out;
+	}
+
+	/* Low Battery Voltage */
+	ret = abx500_set_register_interruptible(di->dev,
+		AB8500_SYS_CTRL2_BLOCK,
+		AB8500_LOW_BAT_REG,
+		ab8500_volt_to_regval(
+			di->bat->fg_params->lowbat_threshold) << 1 |
+		LOW_BAT_ENABLE);
+	if (ret) {
+		dev_err(di->dev, "%s write failed\n", __func__);
+		goto out;
+	}
+
+	/* Battery OK threshold */
+	ret = ab8500_fg_battok_init_hw_register(di);
+	if (ret) {
+		dev_err(di->dev, "BattOk init write failed.\n");
+		goto out;
+	}
+out:
+	return ret;
+}
+
+/**
+ * ab8500_fg_external_power_changed() - callback for power supply changes
+ * @psy:       pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_fg_external_power_changed(struct power_supply *psy)
+{
+	struct ab8500_fg *di = to_ab8500_fg_device_info(psy);
+
+	class_for_each_device(power_supply_class, NULL,
+		&di->fg_psy, ab8500_fg_get_ext_psy_data);
+}
+
+/**
+ * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * @work:	pointer to the work_struct structure
+ *
+ * Used to reset the current battery capacity to be able to
+ * retrigger a new voltage base capacity calculation. For
+ * test and verification purpose.
+ */
+static void ab8500_fg_reinit_work(struct work_struct *work)
+{
+	struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+		fg_reinit_work.work);
+
+	if (di->flags.calibrate == false) {
+		dev_dbg(di->dev, "Resetting FG state machine to init.\n");
+		ab8500_fg_clear_cap_samples(di);
+		ab8500_fg_calc_cap_discharge_voltage(di, true);
+		ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+		ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+		queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+	} else {
+		dev_err(di->dev, "Residual offset calibration ongoing "
+			"retrying..\n");
+		/* Wait one second until next try*/
+		queue_delayed_work(di->fg_wq, &di->fg_reinit_work,
+			round_jiffies(1));
+	}
+}
+
+/**
+ * ab8500_fg_reinit() - forces FG algorithm to reinitialize with current values
+ *
+ * This function can be used to force the FG algorithm to recalculate a new
+ * voltage based battery capacity.
+ */
+void ab8500_fg_reinit(void)
+{
+	struct ab8500_fg *di = ab8500_fg_get();
+	/* User won't be notified if a null pointer returned. */
+	if (di != NULL)
+		queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
+}
+
+/* Exposure to the sysfs interface */
+
+struct ab8500_fg_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct ab8500_fg *, char *);
+	ssize_t (*store)(struct ab8500_fg *, const char *, size_t);
+};
+
+static ssize_t charge_full_show(struct ab8500_fg *di, char *buf)
+{
+	return sprintf(buf, "%d\n", di->bat_cap.max_mah);
+}
+
+static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
+				 size_t count)
+{
+	unsigned long charge_full;
+	ssize_t ret = -EINVAL;
+
+	ret = strict_strtoul(buf, 10, &charge_full);
+
+	dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full);
+
+	if (!ret) {
+		di->bat_cap.max_mah = (int) charge_full;
+		ret = count;
+	}
+	return ret;
+}
+
+static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
+{
+	return sprintf(buf, "%d\n", di->bat_cap.prev_mah);
+}
+
+static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
+				 size_t count)
+{
+	unsigned long charge_now;
+	ssize_t ret;
+
+	ret = strict_strtoul(buf, 10, &charge_now);
+
+	dev_dbg(di->dev, "Ret %zd charge_now %lu was %d",
+		ret, charge_now, di->bat_cap.prev_mah);
+
+	if (!ret) {
+		di->bat_cap.user_mah = (int) charge_now;
+		di->flags.user_cap = true;
+		ret = count;
+		queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+	}
+	return ret;
+}
+
+static struct ab8500_fg_sysfs_entry charge_full_attr =
+	__ATTR(charge_full, 0644, charge_full_show, charge_full_store);
+
+static struct ab8500_fg_sysfs_entry charge_now_attr =
+	__ATTR(charge_now, 0644, charge_now_show, charge_now_store);
+
+static ssize_t
+ab8500_fg_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct ab8500_fg_sysfs_entry *entry;
+	struct ab8500_fg *di;
+
+	entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+	di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+	if (!entry->show)
+		return -EIO;
+
+	return entry->show(di, buf);
+}
+static ssize_t
+ab8500_fg_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+		size_t count)
+{
+	struct ab8500_fg_sysfs_entry *entry;
+	struct ab8500_fg *di;
+
+	entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+	di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+	if (!entry->store)
+		return -EIO;
+
+	return entry->store(di, buf, count);
+}
+
+static const struct sysfs_ops ab8500_fg_sysfs_ops = {
+	.show = ab8500_fg_show,
+	.store = ab8500_fg_store,
+};
+
+static struct attribute *ab8500_fg_attrs[] = {
+	&charge_full_attr.attr,
+	&charge_now_attr.attr,
+	NULL,
+};
+
+static struct kobj_type ab8500_fg_ktype = {
+	.sysfs_ops = &ab8500_fg_sysfs_ops,
+	.default_attrs = ab8500_fg_attrs,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
+{
+	kobject_del(&di->fg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_fg_sysfs_init(struct ab8500_fg *di)
+{
+	int ret = 0;
+
+	ret = kobject_init_and_add(&di->fg_kobject,
+		&ab8500_fg_ktype,
+		NULL, "battery");
+	if (ret < 0)
+		dev_err(di->dev, "failed to create sysfs entry\n");
+
+	return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int ab8500_fg_resume(struct platform_device *pdev)
+{
+	struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+	/*
+	 * Change state if we're not charging. If we're charging we will wake
+	 * up on the FG IRQ
+	 */
+	if (!di->flags.charging) {
+		ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_WAKEUP);
+		queue_work(di->fg_wq, &di->fg_work);
+	}
+
+	return 0;
+}
+
+static int ab8500_fg_suspend(struct platform_device *pdev,
+	pm_message_t state)
+{
+	struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+	flush_delayed_work(&di->fg_periodic_work);
+
+	/*
+	 * If the FG is enabled we will disable it before going to suspend
+	 * only if we're not charging
+	 */
+	if (di->flags.fg_enabled && !di->flags.charging)
+		ab8500_fg_coulomb_counter(di, false);
+
+	return 0;
+}
+#else
+#define ab8500_fg_suspend      NULL
+#define ab8500_fg_resume       NULL
+#endif
+
+static int __devexit ab8500_fg_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+	list_del(&di->node);
+
+	/* Disable coulomb counter */
+	ret = ab8500_fg_coulomb_counter(di, false);
+	if (ret)
+		dev_err(di->dev, "failed to disable coulomb counter\n");
+
+	destroy_workqueue(di->fg_wq);
+	ab8500_fg_sysfs_exit(di);
+
+	flush_scheduled_work();
+	power_supply_unregister(&di->fg_psy);
+	platform_set_drvdata(pdev, NULL);
+	kfree(di);
+	return ret;
+}
+
+/* ab8500 fg driver interrupts and their respective isr */
+static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
+	{"NCONV_ACCU", ab8500_fg_cc_convend_handler},
+	{"BATT_OVV", ab8500_fg_batt_ovv_handler},
+	{"LOW_BAT_F", ab8500_fg_lowbatf_handler},
+	{"CC_INT_CALIB", ab8500_fg_cc_int_calib_handler},
+	{"CCEOC", ab8500_fg_cc_data_end_handler},
+};
+
+static int __devinit ab8500_fg_probe(struct platform_device *pdev)
+{
+	int i, irq;
+	int ret = 0;
+	struct abx500_bm_plat_data *plat_data;
+
+	struct ab8500_fg *di =
+		kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+	if (!di)
+		return -ENOMEM;
+
+	mutex_init(&di->cc_lock);
+
+	/* get parent data */
+	di->dev = &pdev->dev;
+	di->parent = dev_get_drvdata(pdev->dev.parent);
+	di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+	/* get fg specific platform data */
+	plat_data = pdev->dev.platform_data;
+	di->pdata = plat_data->fg;
+	if (!di->pdata) {
+		dev_err(di->dev, "no fg platform data supplied\n");
+		ret = -EINVAL;
+		goto free_device_info;
+	}
+
+	/* get battery specific platform data */
+	di->bat = plat_data->battery;
+	if (!di->bat) {
+		dev_err(di->dev, "no battery platform data supplied\n");
+		ret = -EINVAL;
+		goto free_device_info;
+	}
+
+	di->fg_psy.name = "ab8500_fg";
+	di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+	di->fg_psy.properties = ab8500_fg_props;
+	di->fg_psy.num_properties = ARRAY_SIZE(ab8500_fg_props);
+	di->fg_psy.get_property = ab8500_fg_get_property;
+	di->fg_psy.supplied_to = di->pdata->supplied_to;
+	di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+	di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
+
+	di->bat_cap.max_mah_design = MILLI_TO_MICRO *
+		di->bat->bat_type[di->bat->batt_id].charge_full_design;
+
+	di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+
+	di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+
+	di->init_capacity = true;
+
+	ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+	ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+
+	/* Create a work queue for running the FG algorithm */
+	di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
+	if (di->fg_wq == NULL) {
+		dev_err(di->dev, "failed to create work queue\n");
+		goto free_device_info;
+	}
+
+	/* Init work for running the fg algorithm instantly */
+	INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
+
+	/* Init work for getting the battery accumulated current */
+	INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
+
+	/* Init work for reinitialising the fg algorithm */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+		ab8500_fg_reinit_work);
+
+	/* Work delayed Queue to run the state machine */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+		ab8500_fg_periodic_work);
+
+	/* Work to check low battery condition */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+		ab8500_fg_low_bat_work);
+
+	/* Init work for HW failure check */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work,
+		ab8500_fg_check_hw_failure_work);
+
+	/* Initialize OVV, and other registers */
+	ret = ab8500_fg_init_hw_registers(di);
+	if (ret) {
+		dev_err(di->dev, "failed to initialize registers\n");
+		goto free_inst_curr_wq;
+	}
+
+	/* Consider battery unknown until we're informed otherwise */
+	di->flags.batt_unknown = true;
+	di->flags.batt_id_received = false;
+
+	/* Register FG power supply class */
+	ret = power_supply_register(di->dev, &di->fg_psy);
+	if (ret) {
+		dev_err(di->dev, "failed to register FG psy\n");
+		goto free_inst_curr_wq;
+	}
+
+	di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+	ab8500_fg_coulomb_counter(di, true);
+
+	/* Initialize completion used to notify completion of inst current */
+	init_completion(&di->ab8500_fg_complete);
+
+	/* Register interrupts */
+	for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
+		irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+		ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
+			IRQF_SHARED | IRQF_NO_SUSPEND,
+			ab8500_fg_irq[i].name, di);
+
+		if (ret != 0) {
+			dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+				, ab8500_fg_irq[i].name, irq, ret);
+			goto free_irq;
+		}
+		dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+			ab8500_fg_irq[i].name, irq, ret);
+	}
+	di->irq = platform_get_irq_byname(pdev, "CCEOC");
+	disable_irq(di->irq);
+
+	platform_set_drvdata(pdev, di);
+
+	ret = ab8500_fg_sysfs_init(di);
+	if (ret) {
+		dev_err(di->dev, "failed to create sysfs entry\n");
+		goto free_irq;
+	}
+
+	/* Calibrate the fg first time */
+	di->flags.calibrate = true;
+	di->calib_state = AB8500_FG_CALIB_INIT;
+
+	/* Use room temp as default value until we get an update from driver. */
+	di->bat_temp = 210;
+
+	/* Run the FG algorithm */
+	queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+	list_add_tail(&di->node, &ab8500_fg_list);
+
+	return ret;
+
+free_irq:
+	power_supply_unregister(&di->fg_psy);
+
+	/* We also have to free all successfully registered irqs */
+	for (i = i - 1; i >= 0; i--) {
+		irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+		free_irq(irq, di);
+	}
+free_inst_curr_wq:
+	destroy_workqueue(di->fg_wq);
+free_device_info:
+	kfree(di);
+
+	return ret;
+}
+
+static struct platform_driver ab8500_fg_driver = {
+	.probe = ab8500_fg_probe,
+	.remove = __devexit_p(ab8500_fg_remove),
+	.suspend = ab8500_fg_suspend,
+	.resume = ab8500_fg_resume,
+	.driver = {
+		.name = "ab8500-fg",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init ab8500_fg_init(void)
+{
+	return platform_driver_register(&ab8500_fg_driver);
+}
+
+static void __exit ab8500_fg_exit(void)
+{
+	platform_driver_unregister(&ab8500_fg_driver);
+}
+
+subsys_initcall_sync(ab8500_fg_init);
+module_exit(ab8500_fg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-fg");
+MODULE_DESCRIPTION("AB8500 Fuel Gauge driver");
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
new file mode 100644
index 0000000..804b88c
--- /dev/null
+++ b/drivers/power/abx500_chargalg.c
@@ -0,0 +1,1921 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Charging algorithm driver for abx500 variants
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ *	Johan Palsson <johan.palsson@stericsson.com>
+ *	Karl Komierowski <karl.komierowski@stericsson.com>
+ *	Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL			(6 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT			10
+
+/* Recharge criteria counter */
+#define RCH_COND_CNT			3
+
+#define to_abx500_chargalg_device_info(x) container_of((x), \
+	struct abx500_chargalg, chargalg_psy);
+
+enum abx500_chargers {
+	NO_CHG,
+	AC_CHG,
+	USB_CHG,
+};
+
+struct abx500_chargalg_charger_info {
+	enum abx500_chargers conn_chg;
+	enum abx500_chargers prev_conn_chg;
+	enum abx500_chargers online_chg;
+	enum abx500_chargers prev_online_chg;
+	enum abx500_chargers charger_type;
+	bool usb_chg_ok;
+	bool ac_chg_ok;
+	int usb_volt;
+	int usb_curr;
+	int ac_volt;
+	int ac_curr;
+	int usb_vset;
+	int usb_iset;
+	int ac_vset;
+	int ac_iset;
+};
+
+struct abx500_chargalg_suspension_status {
+	bool suspended_change;
+	bool ac_suspended;
+	bool usb_suspended;
+};
+
+struct abx500_chargalg_battery_data {
+	int temp;
+	int volt;
+	int avg_curr;
+	int inst_curr;
+	int percent;
+};
+
+enum abx500_chargalg_states {
+	STATE_HANDHELD_INIT,
+	STATE_HANDHELD,
+	STATE_CHG_NOT_OK_INIT,
+	STATE_CHG_NOT_OK,
+	STATE_HW_TEMP_PROTECT_INIT,
+	STATE_HW_TEMP_PROTECT,
+	STATE_NORMAL_INIT,
+	STATE_NORMAL,
+	STATE_WAIT_FOR_RECHARGE_INIT,
+	STATE_WAIT_FOR_RECHARGE,
+	STATE_MAINTENANCE_A_INIT,
+	STATE_MAINTENANCE_A,
+	STATE_MAINTENANCE_B_INIT,
+	STATE_MAINTENANCE_B,
+	STATE_TEMP_UNDEROVER_INIT,
+	STATE_TEMP_UNDEROVER,
+	STATE_TEMP_LOWHIGH_INIT,
+	STATE_TEMP_LOWHIGH,
+	STATE_SUSPENDED_INIT,
+	STATE_SUSPENDED,
+	STATE_OVV_PROTECT_INIT,
+	STATE_OVV_PROTECT,
+	STATE_SAFETY_TIMER_EXPIRED_INIT,
+	STATE_SAFETY_TIMER_EXPIRED,
+	STATE_BATT_REMOVED_INIT,
+	STATE_BATT_REMOVED,
+	STATE_WD_EXPIRED_INIT,
+	STATE_WD_EXPIRED,
+};
+
+static const char *states[] = {
+	"HANDHELD_INIT",
+	"HANDHELD",
+	"CHG_NOT_OK_INIT",
+	"CHG_NOT_OK",
+	"HW_TEMP_PROTECT_INIT",
+	"HW_TEMP_PROTECT",
+	"NORMAL_INIT",
+	"NORMAL",
+	"WAIT_FOR_RECHARGE_INIT",
+	"WAIT_FOR_RECHARGE",
+	"MAINTENANCE_A_INIT",
+	"MAINTENANCE_A",
+	"MAINTENANCE_B_INIT",
+	"MAINTENANCE_B",
+	"TEMP_UNDEROVER_INIT",
+	"TEMP_UNDEROVER",
+	"TEMP_LOWHIGH_INIT",
+	"TEMP_LOWHIGH",
+	"SUSPENDED_INIT",
+	"SUSPENDED",
+	"OVV_PROTECT_INIT",
+	"OVV_PROTECT",
+	"SAFETY_TIMER_EXPIRED_INIT",
+	"SAFETY_TIMER_EXPIRED",
+	"BATT_REMOVED_INIT",
+	"BATT_REMOVED",
+	"WD_EXPIRED_INIT",
+	"WD_EXPIRED",
+};
+
+struct abx500_chargalg_events {
+	bool batt_unknown;
+	bool mainextchnotok;
+	bool batt_ovv;
+	bool batt_rem;
+	bool btemp_underover;
+	bool btemp_lowhigh;
+	bool main_thermal_prot;
+	bool usb_thermal_prot;
+	bool main_ovv;
+	bool vbus_ovv;
+	bool usbchargernotok;
+	bool safety_timer_expired;
+	bool maintenance_timer_expired;
+	bool ac_wd_expired;
+	bool usb_wd_expired;
+	bool ac_cv_active;
+	bool usb_cv_active;
+	bool vbus_collapsed;
+};
+
+/**
+ * struct abx500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset:	the non optimized/maximised charger current
+ * @current_iset:	the charging current used at this moment
+ * @test_delta_i:	the delta between the current we want to charge and the
+			current that is really going into the battery
+ * @condition_cnt:	number of iterations needed before a new charger current
+			is set
+ * @max_current:	maximum charger current
+ * @wait_cnt:		to avoid too fast current step down in case of charger
+ *			voltage collapse, we insert this delay between step
+ *			down
+ * @level:		tells in how many steps the charging current has been
+			increased
+ */
+struct abx500_charge_curr_maximization {
+	int original_iset;
+	int current_iset;
+	int test_delta_i;
+	int condition_cnt;
+	int max_current;
+	int wait_cnt;
+	u8 level;
+};
+
+enum maxim_ret {
+	MAXIM_RET_NOACTION,
+	MAXIM_RET_CHANGE,
+	MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+/**
+ * struct abx500_chargalg - abx500 Charging algorithm device information
+ * @dev:		pointer to the structure device
+ * @charge_status:	battery operating status
+ * @eoc_cnt:		counter used to determine end-of_charge
+ * @rch_cnt:		counter used to determine start of recharge
+ * @maintenance_chg:	indicate if maintenance charge is active
+ * @t_hyst_norm		temperature hysteresis when the temperature has been
+ *			over or under normal limits
+ * @t_hyst_lowhigh	temperature hysteresis when the temperature has been
+ *			over or under the high or low limits
+ * @charge_state:	current state of the charging algorithm
+ * @ccm			charging current maximization parameters
+ * @chg_info:		information about connected charger types
+ * @batt_data:		data of the battery
+ * @susp_status:	current charger suspension status
+ * @pdata:		pointer to the abx500_chargalg platform data
+ * @bat:		pointer to the abx500_bm platform data
+ * @chargalg_psy:	structure that holds the battery properties exposed by
+ *			the charging algorithm
+ * @events:		structure for information about events triggered
+ * @chargalg_wq:		work queue for running the charging algorithm
+ * @chargalg_periodic_work:	work to run the charging algorithm periodically
+ * @chargalg_wd_work:		work to kick the charger watchdog periodically
+ * @chargalg_work:		work to run the charging algorithm instantly
+ * @safety_timer:		charging safety timer
+ * @maintenance_timer:		maintenance charging timer
+ * @chargalg_kobject:		structure of type kobject
+ */
+struct abx500_chargalg {
+	struct device *dev;
+	int charge_status;
+	int eoc_cnt;
+	int rch_cnt;
+	bool maintenance_chg;
+	int t_hyst_norm;
+	int t_hyst_lowhigh;
+	enum abx500_chargalg_states charge_state;
+	struct abx500_charge_curr_maximization ccm;
+	struct abx500_chargalg_charger_info chg_info;
+	struct abx500_chargalg_battery_data batt_data;
+	struct abx500_chargalg_suspension_status susp_status;
+	struct abx500_chargalg_platform_data *pdata;
+	struct abx500_bm_data *bat;
+	struct power_supply chargalg_psy;
+	struct ux500_charger *ac_chg;
+	struct ux500_charger *usb_chg;
+	struct abx500_chargalg_events events;
+	struct workqueue_struct *chargalg_wq;
+	struct delayed_work chargalg_periodic_work;
+	struct delayed_work chargalg_wd_work;
+	struct work_struct chargalg_work;
+	struct timer_list safety_timer;
+	struct timer_list maintenance_timer;
+	struct kobject chargalg_kobject;
+};
+
+/* Main battery properties */
+static enum power_supply_property abx500_chargalg_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+};
+
+/**
+ * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @data:	pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static void abx500_chargalg_safety_timer_expired(unsigned long data)
+{
+	struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+	dev_err(di->dev, "Safety timer expired\n");
+	di->events.safety_timer_expired = true;
+
+	/* Trigger execution of the algorithm instantly */
+	queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @i:		pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static void abx500_chargalg_maintenance_timer_expired(unsigned long data)
+{
+
+	struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+	dev_dbg(di->dev, "Maintenance timer expired\n");
+	di->events.maintenance_timer_expired = true;
+
+	/* Trigger execution of the algorithm instantly */
+	queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_state_to() - Change charge state
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void abx500_chargalg_state_to(struct abx500_chargalg *di,
+	enum abx500_chargalg_states state)
+{
+	dev_dbg(di->dev,
+		"State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+		di->charge_state == state ? "NO" : "YES",
+		di->charge_state,
+		states[di->charge_state],
+		state,
+		states[state]);
+
+	di->charge_state = state;
+}
+
+/**
+ * abx500_chargalg_check_charger_connection() - Check charger connection change
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
+{
+	if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+		di->susp_status.suspended_change) {
+		/*
+		 * Charger state changed or suspension
+		 * has changed since last update
+		 */
+		if ((di->chg_info.conn_chg & AC_CHG) &&
+			!di->susp_status.ac_suspended) {
+			dev_dbg(di->dev, "Charging source is AC\n");
+			if (di->chg_info.charger_type != AC_CHG) {
+				di->chg_info.charger_type = AC_CHG;
+				abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+			}
+		} else if ((di->chg_info.conn_chg & USB_CHG) &&
+			!di->susp_status.usb_suspended) {
+			dev_dbg(di->dev, "Charging source is USB\n");
+			di->chg_info.charger_type = USB_CHG;
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		} else if (di->chg_info.conn_chg &&
+			(di->susp_status.ac_suspended ||
+			di->susp_status.usb_suspended)) {
+			dev_dbg(di->dev, "Charging is suspended\n");
+			di->chg_info.charger_type = NO_CHG;
+			abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+		} else {
+			dev_dbg(di->dev, "Charging source is OFF\n");
+			di->chg_info.charger_type = NO_CHG;
+			abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+		}
+		di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+		di->susp_status.suspended_change = false;
+	}
+	return di->chg_info.conn_chg;
+}
+
+/**
+ * abx500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
+{
+	unsigned long timer_expiration = 0;
+
+	switch (di->chg_info.charger_type) {
+	case AC_CHG:
+		timer_expiration =
+		round_jiffies(jiffies +
+			(di->bat->main_safety_tmr_h * 3600 * HZ));
+		break;
+
+	case USB_CHG:
+		timer_expiration =
+		round_jiffies(jiffies +
+			(di->bat->usb_safety_tmr_h * 3600 * HZ));
+		break;
+
+	default:
+		dev_err(di->dev, "Unknown charger to charge from\n");
+		break;
+	}
+
+	di->events.safety_timer_expired = false;
+	di->safety_timer.expires = timer_expiration;
+	if (!timer_pending(&di->safety_timer))
+		add_timer(&di->safety_timer);
+	else
+		mod_timer(&di->safety_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
+{
+	di->events.safety_timer_expired = false;
+	del_timer(&di->safety_timer);
+}
+
+/**
+ * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di:		pointer to the abx500_chargalg structure
+ * @duration:	duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
+	int duration)
+{
+	unsigned long timer_expiration;
+
+	/* Convert from hours to jiffies */
+	timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
+
+	di->events.maintenance_timer_expired = false;
+	di->maintenance_timer.expires = timer_expiration;
+	if (!timer_pending(&di->maintenance_timer))
+		add_timer(&di->maintenance_timer);
+	else
+		mod_timer(&di->maintenance_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
+{
+	di->events.maintenance_timer_expired = false;
+	del_timer(&di->maintenance_timer);
+}
+
+/**
+ * abx500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
+{
+	/* Check if charger exists and kick watchdog if charging */
+	if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+			di->chg_info.online_chg & AC_CHG)
+		return di->ac_chg->ops.kick_wd(di->ac_chg);
+	else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+			di->chg_info.online_chg & USB_CHG)
+		return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+	return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di:		pointer to the abx500_chargalg structure
+ * @enable:	charger on/off
+ * @vset:	requested charger output voltage
+ * @iset:	requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
+	int vset, int iset)
+{
+	if (!di->ac_chg || !di->ac_chg->ops.enable)
+		return -ENXIO;
+
+	/* Select maximum of what both the charger and the battery supports */
+	if (di->ac_chg->max_out_volt)
+		vset = min(vset, di->ac_chg->max_out_volt);
+	if (di->ac_chg->max_out_curr)
+		iset = min(iset, di->ac_chg->max_out_curr);
+
+	di->chg_info.ac_iset = iset;
+	di->chg_info.ac_vset = vset;
+
+	return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di:		pointer to the abx500_chargalg structure
+ * @enable:	charger on/off
+ * @vset:	requested charger output voltage
+ * @iset:	requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
+	int vset, int iset)
+{
+	if (!di->usb_chg || !di->usb_chg->ops.enable)
+		return -ENXIO;
+
+	/* Select maximum of what both the charger and the battery supports */
+	if (di->usb_chg->max_out_volt)
+		vset = min(vset, di->usb_chg->max_out_volt);
+	if (di->usb_chg->max_out_curr)
+		iset = min(iset, di->usb_chg->max_out_curr);
+
+	di->chg_info.usb_iset = iset;
+	di->chg_info.usb_vset = vset;
+
+	return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_update_chg_curr() - Update charger current
+ * @di:		pointer to the abx500_chargalg structure
+ * @iset:	requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
+		int iset)
+{
+	/* Check if charger exists and update current if charging */
+	if (di->ac_chg && di->ac_chg->ops.update_curr &&
+			di->chg_info.charger_type & AC_CHG) {
+		/*
+		 * Select maximum of what both the charger
+		 * and the battery supports
+		 */
+		if (di->ac_chg->max_out_curr)
+			iset = min(iset, di->ac_chg->max_out_curr);
+
+		di->chg_info.ac_iset = iset;
+
+		return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+	} else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+			di->chg_info.charger_type & USB_CHG) {
+		/*
+		 * Select maximum of what both the charger
+		 * and the battery supports
+		 */
+		if (di->usb_chg->max_out_curr)
+			iset = min(iset, di->usb_chg->max_out_curr);
+
+		di->chg_info.usb_iset = iset;
+
+		return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+	}
+
+	return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_stop_charging() - Stop charging
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
+{
+	abx500_chargalg_ac_en(di, false, 0, 0);
+	abx500_chargalg_usb_en(di, false, 0, 0);
+	abx500_chargalg_stop_safety_timer(di);
+	abx500_chargalg_stop_maintenance_timer(di);
+	di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+	di->maintenance_chg = false;
+	cancel_delayed_work(&di->chargalg_wd_work);
+	power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_hold_charging() - Pauses charging
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
+{
+	abx500_chargalg_ac_en(di, false, 0, 0);
+	abx500_chargalg_usb_en(di, false, 0, 0);
+	abx500_chargalg_stop_safety_timer(di);
+	abx500_chargalg_stop_maintenance_timer(di);
+	di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+	di->maintenance_chg = false;
+	cancel_delayed_work(&di->chargalg_wd_work);
+	power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_start_charging() - Start the charger
+ * @di:		pointer to the abx500_chargalg structure
+ * @vset:	requested charger output voltage
+ * @iset:	requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
+	int vset, int iset)
+{
+	switch (di->chg_info.charger_type) {
+	case AC_CHG:
+		dev_dbg(di->dev,
+			"AC parameters: Vset %d, Ich %d\n", vset, iset);
+		abx500_chargalg_usb_en(di, false, 0, 0);
+		abx500_chargalg_ac_en(di, true, vset, iset);
+		break;
+
+	case USB_CHG:
+		dev_dbg(di->dev,
+			"USB parameters: Vset %d, Ich %d\n", vset, iset);
+		abx500_chargalg_ac_en(di, false, 0, 0);
+		abx500_chargalg_usb_en(di, true, vset, iset);
+		break;
+
+	default:
+		dev_err(di->dev, "Unknown charger to charge from\n");
+		break;
+	}
+}
+
+/**
+ * abx500_chargalg_check_temp() - Check battery temperature ranges
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
+{
+	if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
+		di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+		/* Temp OK! */
+		di->events.btemp_underover = false;
+		di->events.btemp_lowhigh = false;
+		di->t_hyst_norm = 0;
+		di->t_hyst_lowhigh = 0;
+	} else {
+		if (((di->batt_data.temp >= di->bat->temp_high) &&
+			(di->batt_data.temp <
+				(di->bat->temp_over - di->t_hyst_lowhigh))) ||
+			((di->batt_data.temp >
+				(di->bat->temp_under + di->t_hyst_lowhigh)) &&
+			(di->batt_data.temp <= di->bat->temp_low))) {
+			/* TEMP minor!!!!! */
+			di->events.btemp_underover = false;
+			di->events.btemp_lowhigh = true;
+			di->t_hyst_norm = di->bat->temp_hysteresis;
+			di->t_hyst_lowhigh = 0;
+		} else if (di->batt_data.temp <= di->bat->temp_under ||
+			di->batt_data.temp >= di->bat->temp_over) {
+			/* TEMP major!!!!! */
+			di->events.btemp_underover = true;
+			di->events.btemp_lowhigh = false;
+			di->t_hyst_norm = 0;
+			di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+		} else {
+		/* Within hysteresis */
+		dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+				"hyst_lowhigh %d, hyst normal %d\n",
+				di->batt_data.temp, di->t_hyst_lowhigh,
+				di->t_hyst_norm);
+		}
+	}
+}
+
+/**
+ * abx500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
+{
+	if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+		di->chg_info.usb_chg_ok = false;
+	else
+		di->chg_info.usb_chg_ok = true;
+
+	if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+		di->chg_info.ac_chg_ok = false;
+	else
+		di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
+{
+	if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+		di->charge_state == STATE_NORMAL &&
+		!di->maintenance_chg && (di->batt_data.volt >=
+		di->bat->bat_type[di->bat->batt_id].termination_vol ||
+		di->events.usb_cv_active || di->events.ac_cv_active) &&
+		di->batt_data.avg_curr <
+		di->bat->bat_type[di->bat->batt_id].termination_curr &&
+		di->batt_data.avg_curr > 0) {
+		if (++di->eoc_cnt >= EOC_COND_CNT) {
+			di->eoc_cnt = 0;
+			di->charge_status = POWER_SUPPLY_STATUS_FULL;
+			di->maintenance_chg = true;
+			dev_dbg(di->dev, "EOC reached!\n");
+			power_supply_changed(&di->chargalg_psy);
+		} else {
+			dev_dbg(di->dev,
+				" EOC limit reached for the %d"
+				" time, out of %d before EOC\n",
+				di->eoc_cnt,
+				EOC_COND_CNT);
+		}
+	} else {
+		di->eoc_cnt = 0;
+	}
+}
+
+static void init_maxim_chg_curr(struct abx500_chargalg *di)
+{
+	di->ccm.original_iset =
+		di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+	di->ccm.current_iset =
+		di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+	di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
+	di->ccm.max_current = di->bat->maxi->chg_curr;
+	di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+	di->ccm.level = 0;
+}
+
+/**
+ * abx500_chargalg_chg_curr_maxim - increases the charger current to
+ *			compensate for the system load
+ * @di		pointer to the abx500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
+{
+	int delta_i;
+
+	if (!di->bat->maxi->ena_maxi)
+		return MAXIM_RET_NOACTION;
+
+	delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+	if (di->events.vbus_collapsed) {
+		dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+				di->ccm.wait_cnt);
+		if (di->ccm.wait_cnt == 0) {
+			dev_dbg(di->dev, "lowering current\n");
+			di->ccm.wait_cnt++;
+			di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+			di->ccm.max_current =
+				di->ccm.current_iset - di->ccm.test_delta_i;
+			di->ccm.current_iset = di->ccm.max_current;
+			di->ccm.level--;
+			return MAXIM_RET_CHANGE;
+		} else {
+			dev_dbg(di->dev, "waiting\n");
+			/* Let's go in here twice before lowering curr again */
+			di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+			return MAXIM_RET_NOACTION;
+		}
+	}
+
+	di->ccm.wait_cnt = 0;
+
+	if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
+		dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+			" (limit %dmA) (current iset: %dmA)!\n",
+			di->batt_data.inst_curr, di->ccm.original_iset,
+			di->ccm.current_iset);
+
+		if (di->ccm.current_iset == di->ccm.original_iset)
+			return MAXIM_RET_NOACTION;
+
+		di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+		di->ccm.current_iset = di->ccm.original_iset;
+		di->ccm.level = 0;
+
+		return MAXIM_RET_IBAT_TOO_HIGH;
+	}
+
+	if (delta_i > di->ccm.test_delta_i &&
+		(di->ccm.current_iset + di->ccm.test_delta_i) <
+		di->ccm.max_current) {
+		if (di->ccm.condition_cnt-- == 0) {
+			/* Increse the iset with cco.test_delta_i */
+			di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+			di->ccm.current_iset += di->ccm.test_delta_i;
+			di->ccm.level++;
+			dev_dbg(di->dev, " Maximization needed, increase"
+				" with %d mA to %dmA (Optimal ibat: %d)"
+				" Level %d\n",
+				di->ccm.test_delta_i,
+				di->ccm.current_iset,
+				di->ccm.original_iset,
+				di->ccm.level);
+			return MAXIM_RET_CHANGE;
+		} else {
+			return MAXIM_RET_NOACTION;
+		}
+	}  else {
+		di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+		return MAXIM_RET_NOACTION;
+	}
+}
+
+static void handle_maxim_chg_curr(struct abx500_chargalg *di)
+{
+	enum maxim_ret ret;
+	int result;
+
+	ret = abx500_chargalg_chg_curr_maxim(di);
+	switch (ret) {
+	case MAXIM_RET_CHANGE:
+		result = abx500_chargalg_update_chg_curr(di,
+			di->ccm.current_iset);
+		if (result)
+			dev_err(di->dev, "failed to set chg curr\n");
+		break;
+	case MAXIM_RET_IBAT_TOO_HIGH:
+		result = abx500_chargalg_update_chg_curr(di,
+			di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+		if (result)
+			dev_err(di->dev, "failed to set chg curr\n");
+		break;
+
+	case MAXIM_RET_NOACTION:
+	default:
+		/* Do nothing..*/
+		break;
+	}
+}
+
+static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+	struct power_supply *psy;
+	struct power_supply *ext;
+	struct abx500_chargalg *di;
+	union power_supply_propval ret;
+	int i, j;
+	bool psy_found = false;
+
+	psy = (struct power_supply *)data;
+	ext = dev_get_drvdata(dev);
+	di = to_abx500_chargalg_device_info(psy);
+	/* For all psy where the driver name appears in any supplied_to */
+	for (i = 0; i < ext->num_supplicants; i++) {
+		if (!strcmp(ext->supplied_to[i], psy->name))
+			psy_found = true;
+	}
+	if (!psy_found)
+		return 0;
+
+	/* Go through all properties for the psy */
+	for (j = 0; j < ext->num_properties; j++) {
+		enum power_supply_property prop;
+		prop = ext->properties[j];
+
+		/* Initialize chargers if not already done */
+		if (!di->ac_chg &&
+			ext->type == POWER_SUPPLY_TYPE_MAINS)
+			di->ac_chg = psy_to_ux500_charger(ext);
+		else if (!di->usb_chg &&
+			ext->type == POWER_SUPPLY_TYPE_USB)
+			di->usb_chg = psy_to_ux500_charger(ext);
+
+		if (ext->get_property(ext, prop, &ret))
+			continue;
+		switch (prop) {
+		case POWER_SUPPLY_PROP_PRESENT:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				/* Battery present */
+				if (ret.intval)
+					di->events.batt_rem = false;
+				/* Battery removed */
+				else
+					di->events.batt_rem = true;
+				break;
+			case POWER_SUPPLY_TYPE_MAINS:
+				/* AC disconnected */
+				if (!ret.intval &&
+					(di->chg_info.conn_chg & AC_CHG)) {
+					di->chg_info.prev_conn_chg =
+						di->chg_info.conn_chg;
+					di->chg_info.conn_chg &= ~AC_CHG;
+				}
+				/* AC connected */
+				else if (ret.intval &&
+					!(di->chg_info.conn_chg & AC_CHG)) {
+					di->chg_info.prev_conn_chg =
+						di->chg_info.conn_chg;
+					di->chg_info.conn_chg |= AC_CHG;
+				}
+				break;
+			case POWER_SUPPLY_TYPE_USB:
+				/* USB disconnected */
+				if (!ret.intval &&
+					(di->chg_info.conn_chg & USB_CHG)) {
+					di->chg_info.prev_conn_chg =
+						di->chg_info.conn_chg;
+					di->chg_info.conn_chg &= ~USB_CHG;
+				}
+				/* USB connected */
+				else if (ret.intval &&
+					!(di->chg_info.conn_chg & USB_CHG)) {
+					di->chg_info.prev_conn_chg =
+						di->chg_info.conn_chg;
+					di->chg_info.conn_chg |= USB_CHG;
+				}
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case POWER_SUPPLY_PROP_ONLINE:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				break;
+			case POWER_SUPPLY_TYPE_MAINS:
+				/* AC offline */
+				if (!ret.intval &&
+					(di->chg_info.online_chg & AC_CHG)) {
+					di->chg_info.prev_online_chg =
+						di->chg_info.online_chg;
+					di->chg_info.online_chg &= ~AC_CHG;
+				}
+				/* AC online */
+				else if (ret.intval &&
+					!(di->chg_info.online_chg & AC_CHG)) {
+					di->chg_info.prev_online_chg =
+						di->chg_info.online_chg;
+					di->chg_info.online_chg |= AC_CHG;
+					queue_delayed_work(di->chargalg_wq,
+						&di->chargalg_wd_work, 0);
+				}
+				break;
+			case POWER_SUPPLY_TYPE_USB:
+				/* USB offline */
+				if (!ret.intval &&
+					(di->chg_info.online_chg & USB_CHG)) {
+					di->chg_info.prev_online_chg =
+						di->chg_info.online_chg;
+					di->chg_info.online_chg &= ~USB_CHG;
+				}
+				/* USB online */
+				else if (ret.intval &&
+					!(di->chg_info.online_chg & USB_CHG)) {
+					di->chg_info.prev_online_chg =
+						di->chg_info.online_chg;
+					di->chg_info.online_chg |= USB_CHG;
+					queue_delayed_work(di->chargalg_wq,
+						&di->chargalg_wd_work, 0);
+				}
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case POWER_SUPPLY_PROP_HEALTH:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				break;
+			case POWER_SUPPLY_TYPE_MAINS:
+				switch (ret.intval) {
+				case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+					di->events.mainextchnotok = true;
+					di->events.main_thermal_prot = false;
+					di->events.main_ovv = false;
+					di->events.ac_wd_expired = false;
+					break;
+				case POWER_SUPPLY_HEALTH_DEAD:
+					di->events.ac_wd_expired = true;
+					di->events.mainextchnotok = false;
+					di->events.main_ovv = false;
+					di->events.main_thermal_prot = false;
+					break;
+				case POWER_SUPPLY_HEALTH_COLD:
+				case POWER_SUPPLY_HEALTH_OVERHEAT:
+					di->events.main_thermal_prot = true;
+					di->events.mainextchnotok = false;
+					di->events.main_ovv = false;
+					di->events.ac_wd_expired = false;
+					break;
+				case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+					di->events.main_ovv = true;
+					di->events.mainextchnotok = false;
+					di->events.main_thermal_prot = false;
+					di->events.ac_wd_expired = false;
+					break;
+				case POWER_SUPPLY_HEALTH_GOOD:
+					di->events.main_thermal_prot = false;
+					di->events.mainextchnotok = false;
+					di->events.main_ovv = false;
+					di->events.ac_wd_expired = false;
+					break;
+				default:
+					break;
+				}
+				break;
+
+			case POWER_SUPPLY_TYPE_USB:
+				switch (ret.intval) {
+				case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+					di->events.usbchargernotok = true;
+					di->events.usb_thermal_prot = false;
+					di->events.vbus_ovv = false;
+					di->events.usb_wd_expired = false;
+					break;
+				case POWER_SUPPLY_HEALTH_DEAD:
+					di->events.usb_wd_expired = true;
+					di->events.usbchargernotok = false;
+					di->events.usb_thermal_prot = false;
+					di->events.vbus_ovv = false;
+					break;
+				case POWER_SUPPLY_HEALTH_COLD:
+				case POWER_SUPPLY_HEALTH_OVERHEAT:
+					di->events.usb_thermal_prot = true;
+					di->events.usbchargernotok = false;
+					di->events.vbus_ovv = false;
+					di->events.usb_wd_expired = false;
+					break;
+				case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+					di->events.vbus_ovv = true;
+					di->events.usbchargernotok = false;
+					di->events.usb_thermal_prot = false;
+					di->events.usb_wd_expired = false;
+					break;
+				case POWER_SUPPLY_HEALTH_GOOD:
+					di->events.usbchargernotok = false;
+					di->events.usb_thermal_prot = false;
+					di->events.vbus_ovv = false;
+					di->events.usb_wd_expired = false;
+					break;
+				default:
+					break;
+				}
+			default:
+				break;
+			}
+			break;
+
+		case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				di->batt_data.volt = ret.intval / 1000;
+				break;
+			case POWER_SUPPLY_TYPE_MAINS:
+				di->chg_info.ac_volt = ret.intval / 1000;
+				break;
+			case POWER_SUPPLY_TYPE_USB:
+				di->chg_info.usb_volt = ret.intval / 1000;
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_MAINS:
+				/* AVG is used to indicate when we are
+				 * in CV mode */
+				if (ret.intval)
+					di->events.ac_cv_active = true;
+				else
+					di->events.ac_cv_active = false;
+
+				break;
+			case POWER_SUPPLY_TYPE_USB:
+				/* AVG is used to indicate when we are
+				 * in CV mode */
+				if (ret.intval)
+					di->events.usb_cv_active = true;
+				else
+					di->events.usb_cv_active = false;
+
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case POWER_SUPPLY_PROP_TECHNOLOGY:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				if (ret.intval)
+					di->events.batt_unknown = false;
+				else
+					di->events.batt_unknown = true;
+
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case POWER_SUPPLY_PROP_TEMP:
+			di->batt_data.temp = ret.intval / 10;
+			break;
+
+		case POWER_SUPPLY_PROP_CURRENT_NOW:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_MAINS:
+					di->chg_info.ac_curr =
+						ret.intval / 1000;
+					break;
+			case POWER_SUPPLY_TYPE_USB:
+					di->chg_info.usb_curr =
+						ret.intval / 1000;
+				break;
+			case POWER_SUPPLY_TYPE_BATTERY:
+				di->batt_data.inst_curr = ret.intval / 1000;
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case POWER_SUPPLY_PROP_CURRENT_AVG:
+			switch (ext->type) {
+			case POWER_SUPPLY_TYPE_BATTERY:
+				di->batt_data.avg_curr = ret.intval / 1000;
+				break;
+			case POWER_SUPPLY_TYPE_USB:
+				if (ret.intval)
+					di->events.vbus_collapsed = true;
+				else
+					di->events.vbus_collapsed = false;
+				break;
+			default:
+				break;
+			}
+			break;
+		case POWER_SUPPLY_PROP_CAPACITY:
+			di->batt_data.percent = ret.intval;
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * abx500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy:       pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void abx500_chargalg_external_power_changed(struct power_supply *psy)
+{
+	struct abx500_chargalg *di = to_abx500_chargalg_device_info(psy);
+
+	/*
+	 * Trigger execution of the algorithm instantly and read
+	 * all power_supply properties there instead
+	 */
+	queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_algorithm() - Main function for the algorithm
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
+{
+	int charger_status;
+
+	/* Collect data from all power_supply class devices */
+	class_for_each_device(power_supply_class, NULL,
+		&di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
+
+	abx500_chargalg_end_of_charge(di);
+	abx500_chargalg_check_temp(di);
+	abx500_chargalg_check_charger_voltage(di);
+
+	charger_status = abx500_chargalg_check_charger_connection(di);
+	/*
+	 * First check if we have a charger connected.
+	 * Also we don't allow charging of unknown batteries if configured
+	 * this way
+	 */
+	if (!charger_status ||
+		(di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+		if (di->charge_state != STATE_HANDHELD) {
+			di->events.safety_timer_expired = false;
+			abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+		}
+	}
+
+	/* If suspended, we should not continue checking the flags */
+	else if (di->charge_state == STATE_SUSPENDED_INIT ||
+		di->charge_state == STATE_SUSPENDED) {
+		/* We don't do anything here, just don,t continue */
+	}
+
+	/* Safety timer expiration */
+	else if (di->events.safety_timer_expired) {
+		if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+			abx500_chargalg_state_to(di,
+				STATE_SAFETY_TIMER_EXPIRED_INIT);
+	}
+	/*
+	 * Check if any interrupts has occured
+	 * that will prevent us from charging
+	 */
+
+	/* Battery removed */
+	else if (di->events.batt_rem) {
+		if (di->charge_state != STATE_BATT_REMOVED)
+			abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+	}
+	/* Main or USB charger not ok. */
+	else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+		/*
+		 * If vbus_collapsed is set, we have to lower the charger
+		 * current, which is done in the normal state below
+		 */
+		if (di->charge_state != STATE_CHG_NOT_OK &&
+				!di->events.vbus_collapsed)
+			abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+	}
+	/* VBUS, Main or VBAT OVV. */
+	else if (di->events.vbus_ovv ||
+			di->events.main_ovv ||
+			di->events.batt_ovv ||
+			!di->chg_info.usb_chg_ok ||
+			!di->chg_info.ac_chg_ok) {
+		if (di->charge_state != STATE_OVV_PROTECT)
+			abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+	}
+	/* USB Thermal, stop charging */
+	else if (di->events.main_thermal_prot ||
+		di->events.usb_thermal_prot) {
+		if (di->charge_state != STATE_HW_TEMP_PROTECT)
+			abx500_chargalg_state_to(di,
+				STATE_HW_TEMP_PROTECT_INIT);
+	}
+	/* Battery temp over/under */
+	else if (di->events.btemp_underover) {
+		if (di->charge_state != STATE_TEMP_UNDEROVER)
+			abx500_chargalg_state_to(di,
+				STATE_TEMP_UNDEROVER_INIT);
+	}
+	/* Watchdog expired */
+	else if (di->events.ac_wd_expired ||
+		di->events.usb_wd_expired) {
+		if (di->charge_state != STATE_WD_EXPIRED)
+			abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+	}
+	/* Battery temp high/low */
+	else if (di->events.btemp_lowhigh) {
+		if (di->charge_state != STATE_TEMP_LOWHIGH)
+			abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+	}
+
+	dev_dbg(di->dev,
+		"[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+		"State %s Active_chg %d Chg_status %d AC %d USB %d "
+		"AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+		"USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+		di->batt_data.volt,
+		di->batt_data.avg_curr,
+		di->batt_data.inst_curr,
+		di->batt_data.temp,
+		di->batt_data.percent,
+		di->maintenance_chg,
+		states[di->charge_state],
+		di->chg_info.charger_type,
+		di->charge_status,
+		di->chg_info.conn_chg & AC_CHG,
+		di->chg_info.conn_chg & USB_CHG,
+		di->chg_info.online_chg & AC_CHG,
+		di->chg_info.online_chg & USB_CHG,
+		di->events.ac_cv_active,
+		di->events.usb_cv_active,
+		di->chg_info.ac_curr,
+		di->chg_info.usb_curr,
+		di->chg_info.ac_vset,
+		di->chg_info.ac_iset,
+		di->chg_info.usb_vset,
+		di->chg_info.usb_iset);
+
+	switch (di->charge_state) {
+	case STATE_HANDHELD_INIT:
+		abx500_chargalg_stop_charging(di);
+		di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+		abx500_chargalg_state_to(di, STATE_HANDHELD);
+		/* Intentional fallthrough */
+
+	case STATE_HANDHELD:
+		break;
+
+	case STATE_SUSPENDED_INIT:
+		if (di->susp_status.ac_suspended)
+			abx500_chargalg_ac_en(di, false, 0, 0);
+		if (di->susp_status.usb_suspended)
+			abx500_chargalg_usb_en(di, false, 0, 0);
+		abx500_chargalg_stop_safety_timer(di);
+		abx500_chargalg_stop_maintenance_timer(di);
+		di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+		di->maintenance_chg = false;
+		abx500_chargalg_state_to(di, STATE_SUSPENDED);
+		power_supply_changed(&di->chargalg_psy);
+		/* Intentional fallthrough */
+
+	case STATE_SUSPENDED:
+		/* CHARGING is suspended */
+		break;
+
+	case STATE_BATT_REMOVED_INIT:
+		abx500_chargalg_stop_charging(di);
+		abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
+		/* Intentional fallthrough */
+
+	case STATE_BATT_REMOVED:
+		if (!di->events.batt_rem)
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+
+	case STATE_HW_TEMP_PROTECT_INIT:
+		abx500_chargalg_stop_charging(di);
+		abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+		/* Intentional fallthrough */
+
+	case STATE_HW_TEMP_PROTECT:
+		if (!di->events.main_thermal_prot &&
+				!di->events.usb_thermal_prot)
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+
+	case STATE_OVV_PROTECT_INIT:
+		abx500_chargalg_stop_charging(di);
+		abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
+		/* Intentional fallthrough */
+
+	case STATE_OVV_PROTECT:
+		if (!di->events.vbus_ovv &&
+				!di->events.main_ovv &&
+				!di->events.batt_ovv &&
+				di->chg_info.usb_chg_ok &&
+				di->chg_info.ac_chg_ok)
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+
+	case STATE_CHG_NOT_OK_INIT:
+		abx500_chargalg_stop_charging(di);
+		abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+		/* Intentional fallthrough */
+
+	case STATE_CHG_NOT_OK:
+		if (!di->events.mainextchnotok &&
+				!di->events.usbchargernotok)
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+
+	case STATE_SAFETY_TIMER_EXPIRED_INIT:
+		abx500_chargalg_stop_charging(di);
+		abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+		/* Intentional fallthrough */
+
+	case STATE_SAFETY_TIMER_EXPIRED:
+		/* We exit this state when charger is removed */
+		break;
+
+	case STATE_NORMAL_INIT:
+		abx500_chargalg_start_charging(di,
+			di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
+			di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+		abx500_chargalg_state_to(di, STATE_NORMAL);
+		abx500_chargalg_start_safety_timer(di);
+		abx500_chargalg_stop_maintenance_timer(di);
+		init_maxim_chg_curr(di);
+		di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+		di->eoc_cnt = 0;
+		di->maintenance_chg = false;
+		power_supply_changed(&di->chargalg_psy);
+
+		break;
+
+	case STATE_NORMAL:
+		handle_maxim_chg_curr(di);
+		if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+			di->maintenance_chg) {
+			if (di->bat->no_maintenance)
+				abx500_chargalg_state_to(di,
+					STATE_WAIT_FOR_RECHARGE_INIT);
+			else
+				abx500_chargalg_state_to(di,
+					STATE_MAINTENANCE_A_INIT);
+		}
+		break;
+
+	/* This state will be used when the maintenance state is disabled */
+	case STATE_WAIT_FOR_RECHARGE_INIT:
+		abx500_chargalg_hold_charging(di);
+		abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+		di->rch_cnt = RCH_COND_CNT;
+		/* Intentional fallthrough */
+
+	case STATE_WAIT_FOR_RECHARGE:
+		if (di->batt_data.volt <=
+			di->bat->bat_type[di->bat->batt_id].recharge_vol) {
+			if (di->rch_cnt-- == 0)
+				abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		} else
+			di->rch_cnt = RCH_COND_CNT;
+		break;
+
+	case STATE_MAINTENANCE_A_INIT:
+		abx500_chargalg_stop_safety_timer(di);
+		abx500_chargalg_start_maintenance_timer(di,
+			di->bat->bat_type[
+				di->bat->batt_id].maint_a_chg_timer_h);
+		abx500_chargalg_start_charging(di,
+			di->bat->bat_type[
+				di->bat->batt_id].maint_a_vol_lvl,
+			di->bat->bat_type[
+				di->bat->batt_id].maint_a_cur_lvl);
+		abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+		power_supply_changed(&di->chargalg_psy);
+		/* Intentional fallthrough*/
+
+	case STATE_MAINTENANCE_A:
+		if (di->events.maintenance_timer_expired) {
+			abx500_chargalg_stop_maintenance_timer(di);
+			abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+		}
+		break;
+
+	case STATE_MAINTENANCE_B_INIT:
+		abx500_chargalg_start_maintenance_timer(di,
+			di->bat->bat_type[
+				di->bat->batt_id].maint_b_chg_timer_h);
+		abx500_chargalg_start_charging(di,
+			di->bat->bat_type[
+				di->bat->batt_id].maint_b_vol_lvl,
+			di->bat->bat_type[
+				di->bat->batt_id].maint_b_cur_lvl);
+		abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+		power_supply_changed(&di->chargalg_psy);
+		/* Intentional fallthrough*/
+
+	case STATE_MAINTENANCE_B:
+		if (di->events.maintenance_timer_expired) {
+			abx500_chargalg_stop_maintenance_timer(di);
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		}
+		break;
+
+	case STATE_TEMP_LOWHIGH_INIT:
+		abx500_chargalg_start_charging(di,
+			di->bat->bat_type[
+				di->bat->batt_id].low_high_vol_lvl,
+			di->bat->bat_type[
+				di->bat->batt_id].low_high_cur_lvl);
+		abx500_chargalg_stop_maintenance_timer(di);
+		di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+		abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+		power_supply_changed(&di->chargalg_psy);
+		/* Intentional fallthrough */
+
+	case STATE_TEMP_LOWHIGH:
+		if (!di->events.btemp_lowhigh)
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+
+	case STATE_WD_EXPIRED_INIT:
+		abx500_chargalg_stop_charging(di);
+		abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
+		/* Intentional fallthrough */
+
+	case STATE_WD_EXPIRED:
+		if (!di->events.ac_wd_expired &&
+				!di->events.usb_wd_expired)
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+
+	case STATE_TEMP_UNDEROVER_INIT:
+		abx500_chargalg_stop_charging(di);
+		abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+		/* Intentional fallthrough */
+
+	case STATE_TEMP_UNDEROVER:
+		if (!di->events.btemp_underover)
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+	}
+
+	/* Start charging directly if the new state is a charge state */
+	if (di->charge_state == STATE_NORMAL_INIT ||
+			di->charge_state == STATE_MAINTENANCE_A_INIT ||
+			di->charge_state == STATE_MAINTENANCE_B_INIT)
+		queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void abx500_chargalg_periodic_work(struct work_struct *work)
+{
+	struct abx500_chargalg *di = container_of(work,
+		struct abx500_chargalg, chargalg_periodic_work.work);
+
+	abx500_chargalg_algorithm(di);
+
+	/*
+	 * If a charger is connected then the battery has to be monitored
+	 * frequently, else the work can be delayed.
+	 */
+	if (di->chg_info.conn_chg)
+		queue_delayed_work(di->chargalg_wq,
+			&di->chargalg_periodic_work,
+			di->bat->interval_charging * HZ);
+	else
+		queue_delayed_work(di->chargalg_wq,
+			&di->chargalg_periodic_work,
+			di->bat->interval_not_charging * HZ);
+}
+
+/**
+ * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void abx500_chargalg_wd_work(struct work_struct *work)
+{
+	int ret;
+	struct abx500_chargalg *di = container_of(work,
+		struct abx500_chargalg, chargalg_wd_work.work);
+
+	dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
+
+	ret = abx500_chargalg_kick_watchdog(di);
+	if (ret < 0)
+		dev_err(di->dev, "failed to kick watchdog\n");
+
+	queue_delayed_work(di->chargalg_wq,
+		&di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * abx500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work:	pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void abx500_chargalg_work(struct work_struct *work)
+{
+	struct abx500_chargalg *di = container_of(work,
+		struct abx500_chargalg, chargalg_work);
+
+	abx500_chargalg_algorithm(di);
+}
+
+/**
+ * abx500_chargalg_get_property() - get the chargalg properties
+ * @psy:	pointer to the power_supply structure
+ * @psp:	pointer to the power_supply_property structure
+ * @val:	pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status:     charging/discharging/full/unknown
+ * health:     health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int abx500_chargalg_get_property(struct power_supply *psy,
+	enum power_supply_property psp,
+	union power_supply_propval *val)
+{
+	struct abx500_chargalg *di;
+
+	di = to_abx500_chargalg_device_info(psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = di->charge_status;
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		if (di->events.batt_ovv) {
+			val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+		} else if (di->events.btemp_underover) {
+			if (di->batt_data.temp <= di->bat->temp_under)
+				val->intval = POWER_SUPPLY_HEALTH_COLD;
+			else
+				val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+		} else {
+			val->intval = POWER_SUPPLY_HEALTH_GOOD;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+/**
+ * abx500_chargalg_sysfs_charger() - sysfs store operations
+ * @kobj:      pointer to the struct kobject
+ * @attr:      pointer to the struct attribute
+ * @buf:       buffer that holds the parameter passed from userspace
+ * @length:    length of the parameter passed
+ *
+ * Returns length of the buffer(input taken from user space) on success
+ * else error code on failure
+ * The operation to be performed on passing the parameters from the user space.
+ */
+static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
+	struct attribute *attr, const char *buf, size_t length)
+{
+	struct abx500_chargalg *di = container_of(kobj,
+		struct abx500_chargalg, chargalg_kobject);
+	long int param;
+	int ac_usb;
+	int ret;
+	char entry = *attr->name;
+
+	switch (entry) {
+	case 'c':
+		ret = strict_strtol(buf, 10, &param);
+		if (ret < 0)
+			return ret;
+
+		ac_usb = param;
+		switch (ac_usb) {
+		case 0:
+			/* Disable charging */
+			di->susp_status.ac_suspended = true;
+			di->susp_status.usb_suspended = true;
+			di->susp_status.suspended_change = true;
+			/* Trigger a state change */
+			queue_work(di->chargalg_wq,
+				&di->chargalg_work);
+			break;
+		case 1:
+			/* Enable AC Charging */
+			di->susp_status.ac_suspended = false;
+			di->susp_status.suspended_change = true;
+			/* Trigger a state change */
+			queue_work(di->chargalg_wq,
+				&di->chargalg_work);
+			break;
+		case 2:
+			/* Enable USB charging */
+			di->susp_status.usb_suspended = false;
+			di->susp_status.suspended_change = true;
+			/* Trigger a state change */
+			queue_work(di->chargalg_wq,
+				&di->chargalg_work);
+			break;
+		default:
+			dev_info(di->dev, "Wrong input\n"
+				"Enter 0. Disable AC/USB Charging\n"
+				"1. Enable AC charging\n"
+				"2. Enable USB Charging\n");
+		};
+		break;
+	};
+	return strlen(buf);
+}
+
+static struct attribute abx500_chargalg_en_charger = \
+{
+	.name = "chargalg",
+	.mode = S_IWUGO,
+};
+
+static struct attribute *abx500_chargalg_chg[] = {
+	&abx500_chargalg_en_charger,
+	NULL
+};
+
+static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
+	.store = abx500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type abx500_chargalg_ktype = {
+	.sysfs_ops = &abx500_chargalg_sysfs_ops,
+	.default_attrs = abx500_chargalg_chg,
+};
+
+/**
+ * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di:                pointer to the struct abx500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
+{
+	kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * abx500_chargalg_sysfs_init() - init of sysfs entry
+ * @di:                pointer to the struct abx500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
+{
+	int ret = 0;
+
+	ret = kobject_init_and_add(&di->chargalg_kobject,
+		&abx500_chargalg_ktype,
+		NULL, "abx500_chargalg");
+	if (ret < 0)
+		dev_err(di->dev, "failed to create sysfs entry\n");
+
+	return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int abx500_chargalg_resume(struct platform_device *pdev)
+{
+	struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+	/* Kick charger watchdog if charging (any charger online) */
+	if (di->chg_info.online_chg)
+		queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+	/*
+	 * Run the charging algorithm directly to be sure we don't
+	 * do it too seldom
+	 */
+	queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+	return 0;
+}
+
+static int abx500_chargalg_suspend(struct platform_device *pdev,
+	pm_message_t state)
+{
+	struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+	if (di->chg_info.online_chg)
+		cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+	cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+	return 0;
+}
+#else
+#define abx500_chargalg_suspend      NULL
+#define abx500_chargalg_resume       NULL
+#endif
+
+static int __devexit abx500_chargalg_remove(struct platform_device *pdev)
+{
+	struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+	/* sysfs interface to enable/disbale charging from user space */
+	abx500_chargalg_sysfs_exit(di);
+
+	/* Delete the work queue */
+	destroy_workqueue(di->chargalg_wq);
+
+	flush_scheduled_work();
+	power_supply_unregister(&di->chargalg_psy);
+	platform_set_drvdata(pdev, NULL);
+	kfree(di);
+
+	return 0;
+}
+
+static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
+{
+	struct abx500_bm_plat_data *plat_data;
+	int ret = 0;
+
+	struct abx500_chargalg *di =
+		kzalloc(sizeof(struct abx500_chargalg), GFP_KERNEL);
+	if (!di)
+		return -ENOMEM;
+
+	/* get device struct */
+	di->dev = &pdev->dev;
+
+	plat_data = pdev->dev.platform_data;
+	di->pdata = plat_data->chargalg;
+	di->bat = plat_data->battery;
+
+	/* chargalg supply */
+	di->chargalg_psy.name = "abx500_chargalg";
+	di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+	di->chargalg_psy.properties = abx500_chargalg_props;
+	di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props);
+	di->chargalg_psy.get_property = abx500_chargalg_get_property;
+	di->chargalg_psy.supplied_to = di->pdata->supplied_to;
+	di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+	di->chargalg_psy.external_power_changed =
+		abx500_chargalg_external_power_changed;
+
+	/* Initilialize safety timer */
+	init_timer(&di->safety_timer);
+	di->safety_timer.function = abx500_chargalg_safety_timer_expired;
+	di->safety_timer.data = (unsigned long) di;
+
+	/* Initilialize maintenance timer */
+	init_timer(&di->maintenance_timer);
+	di->maintenance_timer.function =
+		abx500_chargalg_maintenance_timer_expired;
+	di->maintenance_timer.data = (unsigned long) di;
+
+	/* Create a work queue for the chargalg */
+	di->chargalg_wq =
+		create_singlethread_workqueue("abx500_chargalg_wq");
+	if (di->chargalg_wq == NULL) {
+		dev_err(di->dev, "failed to create work queue\n");
+		goto free_device_info;
+	}
+
+	/* Init work for chargalg */
+	INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+		abx500_chargalg_periodic_work);
+	INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+		abx500_chargalg_wd_work);
+
+	/* Init work for chargalg */
+	INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
+
+	/* To detect charger at startup */
+	di->chg_info.prev_conn_chg = -1;
+
+	/* Register chargalg power supply class */
+	ret = power_supply_register(di->dev, &di->chargalg_psy);
+	if (ret) {
+		dev_err(di->dev, "failed to register chargalg psy\n");
+		goto free_chargalg_wq;
+	}
+
+	platform_set_drvdata(pdev, di);
+
+	/* sysfs interface to enable/disable charging from user space */
+	ret = abx500_chargalg_sysfs_init(di);
+	if (ret) {
+		dev_err(di->dev, "failed to create sysfs entry\n");
+		goto free_psy;
+	}
+
+	/* Run the charging algorithm */
+	queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+	dev_info(di->dev, "probe success\n");
+	return ret;
+
+free_psy:
+	power_supply_unregister(&di->chargalg_psy);
+free_chargalg_wq:
+	destroy_workqueue(di->chargalg_wq);
+free_device_info:
+	kfree(di);
+
+	return ret;
+}
+
+static struct platform_driver abx500_chargalg_driver = {
+	.probe = abx500_chargalg_probe,
+	.remove = __devexit_p(abx500_chargalg_remove),
+	.suspend = abx500_chargalg_suspend,
+	.resume = abx500_chargalg_resume,
+	.driver = {
+		.name = "abx500-chargalg",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init abx500_chargalg_init(void)
+{
+	return platform_driver_register(&abx500_chargalg_driver);
+}
+
+static void __exit abx500_chargalg_exit(void)
+{
+	platform_driver_unregister(&abx500_chargalg_driver);
+}
+
+module_init(abx500_chargalg_init);
+module_exit(abx500_chargalg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:abx500-chargalg");
+MODULE_DESCRIPTION("abx500 battery charging algorithm");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 88fd971..9eca9f1 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -134,12 +134,11 @@
 	union power_supply_propval val;
 	int ret;
 
-	if (cm->fuel_gauge)
-		ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
-				POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
-	else
+	if (!cm->fuel_gauge)
 		return -ENODEV;
 
+	ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+				POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
 	if (ret)
 		return ret;
 
@@ -245,9 +244,7 @@
 	struct charger_desc *desc = cm->desc;
 
 	/* Ignore if it's redundent command */
-	if (enable && cm->charger_enabled)
-		return 0;
-	if (!enable && !cm->charger_enabled)
+	if (enable == cm->charger_enabled)
 		return 0;
 
 	if (enable) {
@@ -309,9 +306,7 @@
 
 		if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE))
 			return; /* Duplicated. */
-		else
-			strncpy(env_str_save, event, UEVENT_BUF_SIZE);
-
+		strncpy(env_str_save, event, UEVENT_BUF_SIZE);
 		return;
 	}
 
@@ -387,8 +382,10 @@
 
 	mutex_lock(&cm_list_mtx);
 
-	list_for_each_entry(cm, &cm_list, entry)
-		stop = stop || _cm_monitor(cm);
+	list_for_each_entry(cm, &cm_list, entry) {
+		if (_cm_monitor(cm))
+			stop = true;
+	}
 
 	mutex_unlock(&cm_list_mtx);
 
@@ -402,7 +399,8 @@
 	struct charger_manager *cm = container_of(psy,
 			struct charger_manager, charger_psy);
 	struct charger_desc *desc = cm->desc;
-	int i, ret = 0, uV;
+	int ret = 0;
+	int uV;
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_STATUS:
@@ -428,8 +426,7 @@
 			val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-		ret = get_batt_uV(cm, &i);
-		val->intval = i;
+		ret = get_batt_uV(cm, &val->intval);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
 		ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
@@ -697,8 +694,10 @@
 	mutex_lock(&cm_list_mtx);
 	list_for_each_entry(cm, &cm_list, entry) {
 		if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
-		    cm->status_save_batt != is_batt_present(cm))
+		    cm->status_save_batt != is_batt_present(cm)) {
 			ret = false;
+			break;
+		}
 	}
 	mutex_unlock(&cm_list_mtx);
 
@@ -855,11 +854,10 @@
 
 	platform_set_drvdata(pdev, cm);
 
-	memcpy(&cm->charger_psy, &psy_default,
-				sizeof(psy_default));
+	memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default));
+
 	if (!desc->psy_name) {
-		strncpy(cm->psy_name_buf, psy_default.name,
-				PSY_NAME_MAX);
+		strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX);
 	} else {
 		strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
 	}
@@ -894,15 +892,15 @@
 				POWER_SUPPLY_PROP_CURRENT_NOW;
 		cm->charger_psy.num_properties++;
 	}
-	if (!desc->measure_battery_temp) {
-		cm->charger_psy.properties[cm->charger_psy.num_properties] =
-				POWER_SUPPLY_PROP_TEMP_AMBIENT;
-		cm->charger_psy.num_properties++;
-	}
+
 	if (desc->measure_battery_temp) {
 		cm->charger_psy.properties[cm->charger_psy.num_properties] =
 				POWER_SUPPLY_PROP_TEMP;
 		cm->charger_psy.num_properties++;
+	} else {
+		cm->charger_psy.properties[cm->charger_psy.num_properties] =
+				POWER_SUPPLY_PROP_TEMP_AMBIENT;
+		cm->charger_psy.num_properties++;
 	}
 
 	ret = power_supply_register(NULL, &cm->charger_psy);
@@ -933,9 +931,8 @@
 	return 0;
 
 err_chg_enable:
-	if (desc->charger_regulators)
-		regulator_bulk_free(desc->num_charger_regulators,
-					desc->charger_regulators);
+	regulator_bulk_free(desc->num_charger_regulators,
+			    desc->charger_regulators);
 err_bulk_get:
 	power_supply_unregister(&cm->charger_psy);
 err_register:
@@ -961,10 +958,8 @@
 	list_del(&cm->entry);
 	mutex_unlock(&cm_list_mtx);
 
-	if (desc->charger_regulators)
-		regulator_bulk_free(desc->num_charger_regulators,
-					desc->charger_regulators);
-
+	regulator_bulk_free(desc->num_charger_regulators,
+			    desc->charger_regulators);
 	power_supply_unregister(&cm->charger_psy);
 	kfree(cm->charger_psy.properties);
 	kfree(cm->charger_stat);
@@ -982,9 +977,7 @@
 
 static int cm_suspend_prepare(struct device *dev)
 {
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
-	struct charger_manager *cm = platform_get_drvdata(pdev);
+	struct charger_manager *cm = dev_get_drvdata(dev);
 
 	if (!cm_suspended) {
 		if (rtc_dev) {
@@ -1020,9 +1013,7 @@
 
 static void cm_suspend_complete(struct device *dev)
 {
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
-	struct charger_manager *cm = platform_get_drvdata(pdev);
+	struct charger_manager *cm = dev_get_drvdata(dev);
 
 	if (cm_suspended) {
 		if (rtc_dev) {
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index e8ea47a..a5f6a0e 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -612,6 +612,7 @@
 	 if (ret)
 		goto err;
 
+	platform_set_drvdata(pdev, bat);
 	return 0;
 
 err:
@@ -633,6 +634,7 @@
 		free_irq(bat->da9052->irq_base + irq, bat);
 	}
 	power_supply_unregister(&bat->psy);
+	kfree(bat);
 
 	return 0;
 }
@@ -645,18 +647,7 @@
 		.owner = THIS_MODULE,
 	},
 };
-
-static int __init da9052_bat_init(void)
-{
-	return platform_driver_register(&da9052_bat_driver);
-}
-module_init(da9052_bat_init);
-
-static void __exit da9052_bat_exit(void)
-{
-	platform_driver_unregister(&da9052_bat_driver);
-}
-module_exit(da9052_bat_exit);
+module_platform_driver(da9052_bat_driver);
 
 MODULE_DESCRIPTION("DA9052 BAT Device Driver");
 MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index bfbce5d..6bb6e2f 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -403,18 +403,7 @@
 	.remove		= ds278x_battery_remove,
 	.id_table	= ds278x_id,
 };
-
-static int __init ds278x_init(void)
-{
-	return i2c_add_driver(&ds278x_battery_driver);
-}
-module_init(ds278x_init);
-
-static void __exit ds278x_exit(void)
-{
-	i2c_del_driver(&ds278x_battery_driver);
-}
-module_exit(ds278x_exit);
+module_i2c_driver(ds278x_battery_driver);
 
 MODULE_AUTHOR("Ryan Mallon");
 MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 1289a5f..39eb50f 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -480,6 +480,7 @@
 
 	dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
 
+	isp1704_charger_set_power(isp, 0);
 	return ret;
 }
 
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index c53dd12..d8b7578 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -1,6 +1,7 @@
 /*
- * Driver for LP8727 Micro/Mini USB IC with intergrated charger
+ * Driver for LP8727 Micro/Mini USB IC with integrated charger
  *
+ *			Copyright (C) 2011 Texas Instruments
  *			Copyright (C) 2011 National Semiconductor
  *
  * This program is free software; you can redistribute it and/or modify
@@ -25,7 +26,7 @@
 #define INT1		0x4
 #define INT2		0x5
 #define STATUS1		0x6
-#define STATUS2 	0x7
+#define STATUS2		0x7
 #define CHGCTRL2	0x9
 
 /* CTRL1 register */
@@ -91,7 +92,7 @@
 	enum lp8727_dev_id devid;
 };
 
-static int lp8727_i2c_read(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+static int lp8727_read_bytes(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
 {
 	s32 ret;
 
@@ -102,29 +103,22 @@
 	return (ret != len) ? -EIO : 0;
 }
 
-static int lp8727_i2c_write(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+static inline int lp8727_read_byte(struct lp8727_chg *pchg, u8 reg, u8 *data)
 {
-	s32 ret;
+	return lp8727_read_bytes(pchg, reg, data, 1);
+}
+
+static int lp8727_write_byte(struct lp8727_chg *pchg, u8 reg, u8 data)
+{
+	int ret;
 
 	mutex_lock(&pchg->xfer_lock);
-	ret = i2c_smbus_write_i2c_block_data(pchg->client, reg, len, data);
+	ret = i2c_smbus_write_byte_data(pchg->client, reg, data);
 	mutex_unlock(&pchg->xfer_lock);
 
 	return ret;
 }
 
-static inline int lp8727_i2c_read_byte(struct lp8727_chg *pchg, u8 reg,
-				       u8 *data)
-{
-	return lp8727_i2c_read(pchg, reg, data, 1);
-}
-
-static inline int lp8727_i2c_write_byte(struct lp8727_chg *pchg, u8 reg,
-					u8 *data)
-{
-	return lp8727_i2c_write(pchg, reg, data, 1);
-}
-
 static int lp8727_is_charger_attached(const char *name, int id)
 {
 	if (name) {
@@ -137,37 +131,41 @@
 	return (id >= ID_TA && id <= ID_USB_CHG) ? 1 : 0;
 }
 
-static void lp8727_init_device(struct lp8727_chg *pchg)
+static int lp8727_init_device(struct lp8727_chg *pchg)
 {
 	u8 val;
+	int ret;
 
 	val = ID200_EN | ADC_EN | CP_EN;
-	if (lp8727_i2c_write_byte(pchg, CTRL1, &val))
-		dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL1);
+	ret = lp8727_write_byte(pchg, CTRL1, val);
+	if (ret)
+		return ret;
 
 	val = INT_EN | CHGDET_EN;
-	if (lp8727_i2c_write_byte(pchg, CTRL2, &val))
-		dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL2);
+	ret = lp8727_write_byte(pchg, CTRL2, val);
+	if (ret)
+		return ret;
+
+	return 0;
 }
 
 static int lp8727_is_dedicated_charger(struct lp8727_chg *pchg)
 {
 	u8 val;
-	lp8727_i2c_read_byte(pchg, STATUS1, &val);
-	return (val & DCPORT);
+	lp8727_read_byte(pchg, STATUS1, &val);
+	return val & DCPORT;
 }
 
 static int lp8727_is_usb_charger(struct lp8727_chg *pchg)
 {
 	u8 val;
-	lp8727_i2c_read_byte(pchg, STATUS1, &val);
-	return (val & CHPORT);
+	lp8727_read_byte(pchg, STATUS1, &val);
+	return val & CHPORT;
 }
 
 static void lp8727_ctrl_switch(struct lp8727_chg *pchg, u8 sw)
 {
-	u8 val = sw;
-	lp8727_i2c_write_byte(pchg, SWCTRL, &val);
+	lp8727_write_byte(pchg, SWCTRL, sw);
 }
 
 static void lp8727_id_detection(struct lp8727_chg *pchg, u8 id, int vbusin)
@@ -207,9 +205,9 @@
 {
 	u8 val;
 
-	lp8727_i2c_read_byte(pchg, CTRL2, &val);
+	lp8727_read_byte(pchg, CTRL2, &val);
 	val |= CHGDET_EN;
-	lp8727_i2c_write_byte(pchg, CTRL2, &val);
+	lp8727_write_byte(pchg, CTRL2, val);
 }
 
 static void lp8727_delayed_func(struct work_struct *_work)
@@ -218,7 +216,7 @@
 	struct lp8727_chg *pchg =
 	    container_of(_work, struct lp8727_chg, work.work);
 
-	if (lp8727_i2c_read(pchg, INT1, intstat, 2)) {
+	if (lp8727_read_bytes(pchg, INT1, intstat, 2)) {
 		dev_err(pchg->dev, "can not read INT registers\n");
 		return;
 	}
@@ -244,20 +242,22 @@
 	return IRQ_HANDLED;
 }
 
-static void lp8727_intr_config(struct lp8727_chg *pchg)
+static int lp8727_intr_config(struct lp8727_chg *pchg)
 {
 	INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func);
 
 	pchg->irqthread = create_singlethread_workqueue("lp8727-irqthd");
-	if (!pchg->irqthread)
+	if (!pchg->irqthread) {
 		dev_err(pchg->dev, "can not create thread for lp8727\n");
-
-	if (request_threaded_irq(pchg->client->irq,
-				 NULL,
-				 lp8727_isr_func,
-				 IRQF_TRIGGER_FALLING, "lp8727_irq", pchg)) {
-		dev_err(pchg->dev, "lp8727 irq can not be registered\n");
+		return -ENOMEM;
 	}
+
+	return request_threaded_irq(pchg->client->irq,
+				NULL,
+				lp8727_isr_func,
+				IRQF_TRIGGER_FALLING,
+				"lp8727_irq",
+				pchg);
 }
 
 static enum power_supply_property lp8727_charger_prop[] = {
@@ -300,7 +300,7 @@
 	switch (psp) {
 	case POWER_SUPPLY_PROP_STATUS:
 		if (lp8727_is_charger_attached(psy->name, pchg->devid)) {
-			lp8727_i2c_read_byte(pchg, STATUS1, &read);
+			lp8727_read_byte(pchg, STATUS1, &read);
 			if (((read & CHGSTAT) >> 4) == EOC)
 				val->intval = POWER_SUPPLY_STATUS_FULL;
 			else
@@ -310,7 +310,7 @@
 		}
 		break;
 	case POWER_SUPPLY_PROP_HEALTH:
-		lp8727_i2c_read_byte(pchg, STATUS2, &read);
+		lp8727_read_byte(pchg, STATUS2, &read);
 		read = (read & TEMP_STAT) >> 5;
 		if (read >= 0x1 && read <= 0x3)
 			val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
@@ -351,7 +351,7 @@
 			eoc_level = pchg->chg_parm->eoc_level;
 			ichg = pchg->chg_parm->ichg;
 			val = (ichg << 4) | eoc_level;
-			lp8727_i2c_write_byte(pchg, CHGCTRL2, &val);
+			lp8727_write_byte(pchg, CHGCTRL2, val);
 		}
 	}
 }
@@ -439,15 +439,29 @@
 
 	mutex_init(&pchg->xfer_lock);
 
-	lp8727_init_device(pchg);
-	lp8727_intr_config(pchg);
+	ret = lp8727_init_device(pchg);
+	if (ret) {
+		dev_err(pchg->dev, "i2c communication err: %d", ret);
+		goto error;
+	}
+
+	ret = lp8727_intr_config(pchg);
+	if (ret) {
+		dev_err(pchg->dev, "irq handler err: %d", ret);
+		goto error;
+	}
 
 	ret = lp8727_register_psy(pchg);
-	if (ret)
-		dev_err(pchg->dev,
-			"can not register power supplies. err=%d", ret);
+	if (ret) {
+		dev_err(pchg->dev, "power supplies register err: %d", ret);
+		goto error;
+	}
 
 	return 0;
+
+error:
+	kfree(pchg);
+	return ret;
 }
 
 static int __devexit lp8727_remove(struct i2c_client *cl)
@@ -466,6 +480,7 @@
 	{"lp8727", 0},
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, lp8727_ids);
 
 static struct i2c_driver lp8727_driver = {
 	.driver = {
@@ -475,21 +490,9 @@
 	.remove = __devexit_p(lp8727_remove),
 	.id_table = lp8727_ids,
 };
+module_i2c_driver(lp8727_driver);
 
-static int __init lp8727_init(void)
-{
-	return i2c_add_driver(&lp8727_driver);
-}
-
-static void __exit lp8727_exit(void)
-{
-	i2c_del_driver(&lp8727_driver);
-}
-
-module_init(lp8727_init);
-module_exit(lp8727_exit);
-
-MODULE_DESCRIPTION("National Semiconductor LP8727 charger driver");
-MODULE_AUTHOR
-    ("Woogyom Kim <milo.kim@ti.com>, Daniel Jeong <daniel.jeong@ti.com>");
+MODULE_DESCRIPTION("TI/National Semiconductor LP8727 charger driver");
+MODULE_AUTHOR("Woogyom Kim <milo.kim@ti.com>, "
+	      "Daniel Jeong <daniel.jeong@ti.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index 2f2f9a6..c284143 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -290,18 +290,7 @@
 	.resume		= max17040_resume,
 	.id_table	= max17040_id,
 };
-
-static int __init max17040_init(void)
-{
-	return i2c_add_driver(&max17040_i2c_driver);
-}
-module_init(max17040_init);
-
-static void __exit max17040_exit(void)
-{
-	i2c_del_driver(&max17040_i2c_driver);
-}
-module_exit(max17040_exit);
+module_i2c_driver(max17040_i2c_driver);
 
 MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
 MODULE_DESCRIPTION("MAX17040 Fuel Gauge");
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 86acee2..04620c2 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -26,14 +26,47 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
 #include <linux/mod_devicetable.h>
 #include <linux/power_supply.h>
 #include <linux/power/max17042_battery.h>
+#include <linux/of.h>
+
+/* Status register bits */
+#define STATUS_POR_BIT         (1 << 1)
+#define STATUS_BST_BIT         (1 << 3)
+#define STATUS_VMN_BIT         (1 << 8)
+#define STATUS_TMN_BIT         (1 << 9)
+#define STATUS_SMN_BIT         (1 << 10)
+#define STATUS_BI_BIT          (1 << 11)
+#define STATUS_VMX_BIT         (1 << 12)
+#define STATUS_TMX_BIT         (1 << 13)
+#define STATUS_SMX_BIT         (1 << 14)
+#define STATUS_BR_BIT          (1 << 15)
+
+/* Interrupt mask bits */
+#define CONFIG_ALRT_BIT_ENBL	(1 << 2)
+#define STATUS_INTR_SOCMIN_BIT	(1 << 10)
+#define STATUS_INTR_SOCMAX_BIT	(1 << 14)
+
+#define VFSOC0_LOCK		0x0000
+#define VFSOC0_UNLOCK		0x0080
+#define MODEL_UNLOCK1	0X0059
+#define MODEL_UNLOCK2	0X00C4
+#define MODEL_LOCK1		0X0000
+#define MODEL_LOCK2		0X0000
+
+#define dQ_ACC_DIV	0x4
+#define dP_ACC_100	0x1900
+#define dP_ACC_200	0x3200
 
 struct max17042_chip {
 	struct i2c_client *client;
 	struct power_supply battery;
 	struct max17042_platform_data *pdata;
+	struct work_struct work;
+	int    init_complete;
 };
 
 static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
@@ -87,6 +120,9 @@
 				struct max17042_chip, battery);
 	int ret;
 
+	if (!chip->init_complete)
+		return -EAGAIN;
+
 	switch (psp) {
 	case POWER_SUPPLY_PROP_PRESENT:
 		ret = max17042_read_reg(chip->client, MAX17042_STATUS);
@@ -136,21 +172,18 @@
 		val->intval = ret * 625 / 8;
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY:
-		ret = max17042_read_reg(chip->client, MAX17042_SOC);
+		ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
 		if (ret < 0)
 			return ret;
 
 		val->intval = ret >> 8;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_FULL:
-		ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+		ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
 		if (ret < 0)
 			return ret;
 
-		if ((ret >> 8) >= MAX17042_BATTERY_FULL)
-			val->intval = 1;
-		else if (ret >= 0)
-			val->intval = 0;
+		val->intval = ret * 1000 / 2;
 		break;
 	case POWER_SUPPLY_PROP_TEMP:
 		ret = max17042_read_reg(chip->client, MAX17042_TEMP);
@@ -210,22 +243,419 @@
 	return 0;
 }
 
+static int max17042_write_verify_reg(struct i2c_client *client,
+				u8 reg, u16 value)
+{
+	int retries = 8;
+	int ret;
+	u16 read_value;
+
+	do {
+		ret = i2c_smbus_write_word_data(client, reg, value);
+		read_value =  max17042_read_reg(client, reg);
+		if (read_value != value) {
+			ret = -EIO;
+			retries--;
+		}
+	} while (retries && read_value != value);
+
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static inline void max17042_override_por(
+	struct i2c_client *client, u8 reg, u16 value)
+{
+	if (value)
+		max17042_write_reg(client, reg, value);
+}
+
+static inline void max10742_unlock_model(struct max17042_chip *chip)
+{
+	struct i2c_client *client = chip->client;
+	max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
+	max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
+}
+
+static inline void max10742_lock_model(struct max17042_chip *chip)
+{
+	struct i2c_client *client = chip->client;
+	max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1);
+	max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2);
+}
+
+static inline void max17042_write_model_data(struct max17042_chip *chip,
+					u8 addr, int size)
+{
+	struct i2c_client *client = chip->client;
+	int i;
+	for (i = 0; i < size; i++)
+		max17042_write_reg(client, addr + i,
+				chip->pdata->config_data->cell_char_tbl[i]);
+}
+
+static inline void max17042_read_model_data(struct max17042_chip *chip,
+					u8 addr, u16 *data, int size)
+{
+	struct i2c_client *client = chip->client;
+	int i;
+
+	for (i = 0; i < size; i++)
+		data[i] = max17042_read_reg(client, addr + i);
+}
+
+static inline int max17042_model_data_compare(struct max17042_chip *chip,
+					u16 *data1, u16 *data2, int size)
+{
+	int i;
+
+	if (memcmp(data1, data2, size)) {
+		dev_err(&chip->client->dev, "%s compare failed\n", __func__);
+		for (i = 0; i < size; i++)
+			dev_info(&chip->client->dev, "0x%x, 0x%x",
+				data1[i], data2[i]);
+		dev_info(&chip->client->dev, "\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int max17042_init_model(struct max17042_chip *chip)
+{
+	int ret;
+	int table_size =
+		sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+	u16 *temp_data;
+
+	temp_data = kzalloc(table_size, GFP_KERNEL);
+	if (!temp_data)
+		return -ENOMEM;
+
+	max10742_unlock_model(chip);
+	max17042_write_model_data(chip, MAX17042_MODELChrTbl,
+				table_size);
+	max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
+				table_size);
+
+	ret = max17042_model_data_compare(
+		chip,
+		chip->pdata->config_data->cell_char_tbl,
+		temp_data,
+		table_size);
+
+	max10742_lock_model(chip);
+	kfree(temp_data);
+
+	return ret;
+}
+
+static int max17042_verify_model_lock(struct max17042_chip *chip)
+{
+	int i;
+	int table_size =
+		sizeof(chip->pdata->config_data->cell_char_tbl);
+	u16 *temp_data;
+	int ret = 0;
+
+	temp_data = kzalloc(table_size, GFP_KERNEL);
+	if (!temp_data)
+		return -ENOMEM;
+
+	max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
+				table_size);
+	for (i = 0; i < table_size; i++)
+		if (temp_data[i])
+			ret = -EINVAL;
+
+	kfree(temp_data);
+	return ret;
+}
+
+static void max17042_write_config_regs(struct max17042_chip *chip)
+{
+	struct max17042_config_data *config = chip->pdata->config_data;
+
+	max17042_write_reg(chip->client, MAX17042_CONFIG, config->config);
+	max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg);
+	max17042_write_reg(chip->client, MAX17042_FilterCFG,
+			config->filter_cfg);
+	max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+}
+
+static void  max17042_write_custom_regs(struct max17042_chip *chip)
+{
+	struct max17042_config_data *config = chip->pdata->config_data;
+
+	max17042_write_verify_reg(chip->client, MAX17042_RCOMP0,
+				config->rcomp0);
+	max17042_write_verify_reg(chip->client, MAX17042_TempCo,
+				config->tcompc0);
+	max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+			config->empty_tempco);
+	max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+				config->kempty0);
+	max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
+				config->ichgt_term);
+}
+
+static void max17042_update_capacity_regs(struct max17042_chip *chip)
+{
+	struct max17042_config_data *config = chip->pdata->config_data;
+
+	max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+				config->fullcap);
+	max17042_write_reg(chip->client, MAX17042_DesignCap,
+			config->design_cap);
+	max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+				config->fullcapnom);
+}
+
+static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip)
+{
+	u16 vfSoc;
+
+	vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+	max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
+	max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc);
+	max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
+}
+
+static void max17042_load_new_capacity_params(struct max17042_chip *chip)
+{
+	u16 full_cap0, rep_cap, dq_acc, vfSoc;
+	u32 rem_cap;
+
+	struct max17042_config_data *config = chip->pdata->config_data;
+
+	full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0);
+	vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+
+	/* fg_vfSoc needs to shifted by 8 bits to get the
+	 * perc in 1% accuracy, to get the right rem_cap multiply
+	 * full_cap0, fg_vfSoc and devide by 100
+	 */
+	rem_cap = ((vfSoc >> 8) * full_cap0) / 100;
+	max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap);
+
+	rep_cap = (u16)rem_cap;
+	max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap);
+
+	/* Write dQ_acc to 200% of Capacity and dP_acc to 200% */
+	dq_acc = config->fullcap / dQ_ACC_DIV;
+	max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc);
+	max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200);
+
+	max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+			config->fullcap);
+	max17042_write_reg(chip->client, MAX17042_DesignCap,
+			config->design_cap);
+	max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+			config->fullcapnom);
+}
+
+/*
+ * Block write all the override values coming from platform data.
+ * This function MUST be called before the POR initialization proceedure
+ * specified by maxim.
+ */
+static inline void max17042_override_por_values(struct max17042_chip *chip)
+{
+	struct i2c_client *client = chip->client;
+	struct max17042_config_data *config = chip->pdata->config_data;
+
+	max17042_override_por(client, MAX17042_TGAIN, config->tgain);
+	max17042_override_por(client, MAx17042_TOFF, config->toff);
+	max17042_override_por(client, MAX17042_CGAIN, config->cgain);
+	max17042_override_por(client, MAX17042_COFF, config->coff);
+
+	max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh);
+	max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh);
+	max17042_override_por(client, MAX17042_SALRT_Th,
+			config->soc_alrt_thresh);
+	max17042_override_por(client, MAX17042_CONFIG, config->config);
+	max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer);
+
+	max17042_override_por(client, MAX17042_DesignCap, config->design_cap);
+	max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term);
+
+	max17042_override_por(client, MAX17042_AtRate, config->at_rate);
+	max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg);
+	max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg);
+	max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg);
+	max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg);
+	max17042_override_por(client, MAX17042_MaskSOC, config->masksoc);
+
+	max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
+	max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
+	max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+	max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
+	max17042_override_por(client, MAX17042_dQacc, config->dqacc);
+	max17042_override_por(client, MAX17042_dPacc, config->dpacc);
+
+	max17042_override_por(client, MAX17042_V_empty, config->vempty);
+	max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
+	max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
+	max17042_override_por(client, MAX17042_FCTC, config->fctc);
+	max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
+	max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
+	max17042_override_por(client, MAX17042_EmptyTempCo,
+			config->empty_tempco);
+	max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+}
+
+static int max17042_init_chip(struct max17042_chip *chip)
+{
+	int ret;
+	int val;
+
+	max17042_override_por_values(chip);
+	/* After Power up, the MAX17042 requires 500mS in order
+	 * to perform signal debouncing and initial SOC reporting
+	 */
+	msleep(500);
+
+	/* Initialize configaration */
+	max17042_write_config_regs(chip);
+
+	/* write cell characterization data */
+	ret = max17042_init_model(chip);
+	if (ret) {
+		dev_err(&chip->client->dev, "%s init failed\n",
+			__func__);
+		return -EIO;
+	}
+	max17042_verify_model_lock(chip);
+	if (ret) {
+		dev_err(&chip->client->dev, "%s lock verify failed\n",
+			__func__);
+		return -EIO;
+	}
+	/* write custom parameters */
+	max17042_write_custom_regs(chip);
+
+	/* update capacity params */
+	max17042_update_capacity_regs(chip);
+
+	/* delay must be atleast 350mS to allow VFSOC
+	 * to be calculated from the new configuration
+	 */
+	msleep(350);
+
+	/* reset vfsoc0 reg */
+	max17042_reset_vfsoc0_reg(chip);
+
+	/* load new capacity params */
+	max17042_load_new_capacity_params(chip);
+
+	/* Init complete, Clear the POR bit */
+	val = max17042_read_reg(chip->client, MAX17042_STATUS);
+	max17042_write_reg(chip->client, MAX17042_STATUS,
+			val & (~STATUS_POR_BIT));
+	return 0;
+}
+
+static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
+{
+	u16 soc, soc_tr;
+
+	/* program interrupt thesholds such that we should
+	 * get interrupt for every 'off' perc change in the soc
+	 */
+	soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8;
+	soc_tr = (soc + off) << 8;
+	soc_tr |= (soc - off);
+	max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
+}
+
+static irqreturn_t max17042_thread_handler(int id, void *dev)
+{
+	struct max17042_chip *chip = dev;
+	u16 val;
+
+	val = max17042_read_reg(chip->client, MAX17042_STATUS);
+	if ((val & STATUS_INTR_SOCMIN_BIT) ||
+		(val & STATUS_INTR_SOCMAX_BIT)) {
+		dev_info(&chip->client->dev, "SOC threshold INTR\n");
+		max17042_set_soc_threshold(chip, 1);
+	}
+
+	power_supply_changed(&chip->battery);
+	return IRQ_HANDLED;
+}
+
+static void max17042_init_worker(struct work_struct *work)
+{
+	struct max17042_chip *chip = container_of(work,
+				struct max17042_chip, work);
+	int ret;
+
+	/* Initialize registers according to values from the platform data */
+	if (chip->pdata->enable_por_init && chip->pdata->config_data) {
+		ret = max17042_init_chip(chip);
+		if (ret)
+			return;
+	}
+
+	chip->init_complete = 1;
+}
+
+#ifdef CONFIG_OF
+static struct max17042_platform_data *
+max17042_get_pdata(struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	u32 prop;
+	struct max17042_platform_data *pdata;
+
+	if (!np)
+		return dev->platform_data;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	/*
+	 * Require current sense resistor value to be specified for
+	 * current-sense functionality to be enabled at all.
+	 */
+	if (of_property_read_u32(np, "maxim,rsns-microohm", &prop) == 0) {
+		pdata->r_sns = prop;
+		pdata->enable_current_sense = true;
+	}
+
+	return pdata;
+}
+#else
+static struct max17042_platform_data *
+max17042_get_pdata(struct device *dev)
+{
+	return dev->platform_data;
+}
+#endif
+
 static int __devinit max17042_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
 	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 	struct max17042_chip *chip;
 	int ret;
+	int reg;
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
 		return -EIO;
 
-	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
 		return -ENOMEM;
 
 	chip->client = client;
-	chip->pdata = client->dev.platform_data;
+	chip->pdata = max17042_get_pdata(&client->dev);
+	if (!chip->pdata) {
+		dev_err(&client->dev, "no platform data provided\n");
+		return -EINVAL;
+	}
 
 	i2c_set_clientdata(client, chip);
 
@@ -243,17 +673,9 @@
 	if (chip->pdata->r_sns == 0)
 		chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
 
-	ret = power_supply_register(&client->dev, &chip->battery);
-	if (ret) {
-		dev_err(&client->dev, "failed: power supply register\n");
-		kfree(chip);
-		return ret;
-	}
-
-	/* Initialize registers according to values from the platform data */
 	if (chip->pdata->init_data)
 		max17042_set_reg(client, chip->pdata->init_data,
-				 chip->pdata->num_init_data);
+				chip->pdata->num_init_data);
 
 	if (!chip->pdata->enable_current_sense) {
 		max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
@@ -261,7 +683,34 @@
 		max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
 	}
 
-	return 0;
+	if (client->irq) {
+		ret = request_threaded_irq(client->irq, NULL,
+						max17042_thread_handler,
+						IRQF_TRIGGER_FALLING,
+						chip->battery.name, chip);
+		if (!ret) {
+			reg =  max17042_read_reg(client, MAX17042_CONFIG);
+			reg |= CONFIG_ALRT_BIT_ENBL;
+			max17042_write_reg(client, MAX17042_CONFIG, reg);
+			max17042_set_soc_threshold(chip, 1);
+		} else
+			dev_err(&client->dev, "%s(): cannot get IRQ\n",
+				__func__);
+	}
+
+	reg = max17042_read_reg(chip->client, MAX17042_STATUS);
+
+	if (reg & STATUS_POR_BIT) {
+		INIT_WORK(&chip->work, max17042_init_worker);
+		schedule_work(&chip->work);
+	} else {
+		chip->init_complete = 1;
+	}
+
+	ret = power_supply_register(&client->dev, &chip->battery);
+	if (ret)
+		dev_err(&client->dev, "failed: power supply register\n");
+	return ret;
 }
 
 static int __devexit max17042_remove(struct i2c_client *client)
@@ -269,10 +718,17 @@
 	struct max17042_chip *chip = i2c_get_clientdata(client);
 
 	power_supply_unregister(&chip->battery);
-	kfree(chip);
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id max17042_dt_match[] = {
+	{ .compatible = "maxim,max17042" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, max17042_dt_match);
+#endif
+
 static const struct i2c_device_id max17042_id[] = {
 	{ "max17042", 0 },
 	{ }
@@ -282,23 +738,13 @@
 static struct i2c_driver max17042_i2c_driver = {
 	.driver	= {
 		.name	= "max17042",
+		.of_match_table = of_match_ptr(max17042_dt_match),
 	},
 	.probe		= max17042_probe,
 	.remove		= __devexit_p(max17042_remove),
 	.id_table	= max17042_id,
 };
-
-static int __init max17042_init(void)
-{
-	return i2c_add_driver(&max17042_i2c_driver);
-}
-module_init(max17042_init);
-
-static void __exit max17042_exit(void)
-{
-	i2c_del_driver(&max17042_i2c_driver);
-}
-module_exit(max17042_exit);
+module_i2c_driver(max17042_i2c_driver);
 
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 9ff8af0..06b659d 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -852,18 +852,7 @@
 		.of_match_table = sbs_dt_ids,
 	},
 };
-
-static int __init sbs_battery_init(void)
-{
-	return i2c_add_driver(&sbs_battery_driver);
-}
-module_init(sbs_battery_init);
-
-static void __exit sbs_battery_exit(void)
-{
-	i2c_del_driver(&sbs_battery_driver);
-}
-module_exit(sbs_battery_exit);
+module_i2c_driver(sbs_battery_driver);
 
 MODULE_DESCRIPTION("SBS battery monitor driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
new file mode 100644
index 0000000..ce1694d
--- /dev/null
+++ b/drivers/power/smb347-charger.c
@@ -0,0 +1,1294 @@
+/*
+ * Summit Microelectronics SMB347 Battery Charger Driver
+ *
+ * Copyright (C) 2011, Intel Corporation
+ *
+ * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/power/smb347-charger.h>
+#include <linux/seq_file.h>
+
+/*
+ * Configuration registers. These are mirrored to volatile RAM and can be
+ * written once %CMD_A_ALLOW_WRITE is set in %CMD_A register. They will be
+ * reloaded from non-volatile registers after POR.
+ */
+#define CFG_CHARGE_CURRENT			0x00
+#define CFG_CHARGE_CURRENT_FCC_MASK		0xe0
+#define CFG_CHARGE_CURRENT_FCC_SHIFT		5
+#define CFG_CHARGE_CURRENT_PCC_MASK		0x18
+#define CFG_CHARGE_CURRENT_PCC_SHIFT		3
+#define CFG_CHARGE_CURRENT_TC_MASK		0x07
+#define CFG_CURRENT_LIMIT			0x01
+#define CFG_CURRENT_LIMIT_DC_MASK		0xf0
+#define CFG_CURRENT_LIMIT_DC_SHIFT		4
+#define CFG_CURRENT_LIMIT_USB_MASK		0x0f
+#define CFG_FLOAT_VOLTAGE			0x03
+#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK	0xc0
+#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT	6
+#define CFG_STAT				0x05
+#define CFG_STAT_DISABLED			BIT(5)
+#define CFG_STAT_ACTIVE_HIGH			BIT(7)
+#define CFG_PIN					0x06
+#define CFG_PIN_EN_CTRL_MASK			0x60
+#define CFG_PIN_EN_CTRL_ACTIVE_HIGH		0x40
+#define CFG_PIN_EN_CTRL_ACTIVE_LOW		0x60
+#define CFG_PIN_EN_APSD_IRQ			BIT(1)
+#define CFG_PIN_EN_CHARGER_ERROR		BIT(2)
+#define CFG_THERM				0x07
+#define CFG_THERM_SOFT_HOT_COMPENSATION_MASK	0x03
+#define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT	0
+#define CFG_THERM_SOFT_COLD_COMPENSATION_MASK	0x0c
+#define CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT	2
+#define CFG_THERM_MONITOR_DISABLED		BIT(4)
+#define CFG_SYSOK				0x08
+#define CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED	BIT(2)
+#define CFG_OTHER				0x09
+#define CFG_OTHER_RID_MASK			0xc0
+#define CFG_OTHER_RID_ENABLED_AUTO_OTG		0xc0
+#define CFG_OTG					0x0a
+#define CFG_OTG_TEMP_THRESHOLD_MASK		0x30
+#define CFG_OTG_TEMP_THRESHOLD_SHIFT		4
+#define CFG_OTG_CC_COMPENSATION_MASK		0xc0
+#define CFG_OTG_CC_COMPENSATION_SHIFT		6
+#define CFG_TEMP_LIMIT				0x0b
+#define CFG_TEMP_LIMIT_SOFT_HOT_MASK		0x03
+#define CFG_TEMP_LIMIT_SOFT_HOT_SHIFT		0
+#define CFG_TEMP_LIMIT_SOFT_COLD_MASK		0x0c
+#define CFG_TEMP_LIMIT_SOFT_COLD_SHIFT		2
+#define CFG_TEMP_LIMIT_HARD_HOT_MASK		0x30
+#define CFG_TEMP_LIMIT_HARD_HOT_SHIFT		4
+#define CFG_TEMP_LIMIT_HARD_COLD_MASK		0xc0
+#define CFG_TEMP_LIMIT_HARD_COLD_SHIFT		6
+#define CFG_FAULT_IRQ				0x0c
+#define CFG_FAULT_IRQ_DCIN_UV			BIT(2)
+#define CFG_STATUS_IRQ				0x0d
+#define CFG_STATUS_IRQ_TERMINATION_OR_TAPER	BIT(4)
+#define CFG_ADDRESS				0x0e
+
+/* Command registers */
+#define CMD_A					0x30
+#define CMD_A_CHG_ENABLED			BIT(1)
+#define CMD_A_SUSPEND_ENABLED			BIT(2)
+#define CMD_A_ALLOW_WRITE			BIT(7)
+#define CMD_B					0x31
+#define CMD_C					0x33
+
+/* Interrupt Status registers */
+#define IRQSTAT_A				0x35
+#define IRQSTAT_C				0x37
+#define IRQSTAT_C_TERMINATION_STAT		BIT(0)
+#define IRQSTAT_C_TERMINATION_IRQ		BIT(1)
+#define IRQSTAT_C_TAPER_IRQ			BIT(3)
+#define IRQSTAT_E				0x39
+#define IRQSTAT_E_USBIN_UV_STAT			BIT(0)
+#define IRQSTAT_E_USBIN_UV_IRQ			BIT(1)
+#define IRQSTAT_E_DCIN_UV_STAT			BIT(4)
+#define IRQSTAT_E_DCIN_UV_IRQ			BIT(5)
+#define IRQSTAT_F				0x3a
+
+/* Status registers */
+#define STAT_A					0x3b
+#define STAT_A_FLOAT_VOLTAGE_MASK		0x3f
+#define STAT_B					0x3c
+#define STAT_C					0x3d
+#define STAT_C_CHG_ENABLED			BIT(0)
+#define STAT_C_CHG_MASK				0x06
+#define STAT_C_CHG_SHIFT			1
+#define STAT_C_CHARGER_ERROR			BIT(6)
+#define STAT_E					0x3f
+
+/**
+ * struct smb347_charger - smb347 charger instance
+ * @lock: protects concurrent access to online variables
+ * @client: pointer to i2c client
+ * @mains: power_supply instance for AC/DC power
+ * @usb: power_supply instance for USB power
+ * @battery: power_supply instance for battery
+ * @mains_online: is AC/DC input connected
+ * @usb_online: is USB input connected
+ * @charging_enabled: is charging enabled
+ * @dentry: for debugfs
+ * @pdata: pointer to platform data
+ */
+struct smb347_charger {
+	struct mutex		lock;
+	struct i2c_client	*client;
+	struct power_supply	mains;
+	struct power_supply	usb;
+	struct power_supply	battery;
+	bool			mains_online;
+	bool			usb_online;
+	bool			charging_enabled;
+	struct dentry		*dentry;
+	const struct smb347_charger_platform_data *pdata;
+};
+
+/* Fast charge current in uA */
+static const unsigned int fcc_tbl[] = {
+	700000,
+	900000,
+	1200000,
+	1500000,
+	1800000,
+	2000000,
+	2200000,
+	2500000,
+};
+
+/* Pre-charge current in uA */
+static const unsigned int pcc_tbl[] = {
+	100000,
+	150000,
+	200000,
+	250000,
+};
+
+/* Termination current in uA */
+static const unsigned int tc_tbl[] = {
+	37500,
+	50000,
+	100000,
+	150000,
+	200000,
+	250000,
+	500000,
+	600000,
+};
+
+/* Input current limit in uA */
+static const unsigned int icl_tbl[] = {
+	300000,
+	500000,
+	700000,
+	900000,
+	1200000,
+	1500000,
+	1800000,
+	2000000,
+	2200000,
+	2500000,
+};
+
+/* Charge current compensation in uA */
+static const unsigned int ccc_tbl[] = {
+	250000,
+	700000,
+	900000,
+	1200000,
+};
+
+/* Convert register value to current using lookup table */
+static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
+{
+	if (val >= size)
+		return -EINVAL;
+	return tbl[val];
+}
+
+/* Convert current to register value using lookup table */
+static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
+{
+	size_t i;
+
+	for (i = 0; i < size; i++)
+		if (val < tbl[i])
+			break;
+	return i > 0 ? i - 1 : -EINVAL;
+}
+
+static int smb347_read(struct smb347_charger *smb, u8 reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(smb->client, reg);
+	if (ret < 0)
+		dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
+			 reg, ret);
+	return ret;
+}
+
+static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(smb->client, reg, val);
+	if (ret < 0)
+		dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
+			 reg, ret);
+	return ret;
+}
+
+/**
+ * smb347_update_status - updates the charging status
+ * @smb: pointer to smb347 charger instance
+ *
+ * Function checks status of the charging and updates internal state
+ * accordingly. Returns %0 if there is no change in status, %1 if the
+ * status has changed and negative errno in case of failure.
+ */
+static int smb347_update_status(struct smb347_charger *smb)
+{
+	bool usb = false;
+	bool dc = false;
+	int ret;
+
+	ret = smb347_read(smb, IRQSTAT_E);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Dc and usb are set depending on whether they are enabled in
+	 * platform data _and_ whether corresponding undervoltage is set.
+	 */
+	if (smb->pdata->use_mains)
+		dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+	if (smb->pdata->use_usb)
+		usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+
+	mutex_lock(&smb->lock);
+	ret = smb->mains_online != dc || smb->usb_online != usb;
+	smb->mains_online = dc;
+	smb->usb_online = usb;
+	mutex_unlock(&smb->lock);
+
+	return ret;
+}
+
+/*
+ * smb347_is_online - returns whether input power source is connected
+ * @smb: pointer to smb347 charger instance
+ *
+ * Returns %true if input power source is connected. Note that this is
+ * dependent on what platform has configured for usable power sources. For
+ * example if USB is disabled, this will return %false even if the USB
+ * cable is connected.
+ */
+static bool smb347_is_online(struct smb347_charger *smb)
+{
+	bool ret;
+
+	mutex_lock(&smb->lock);
+	ret = smb->usb_online || smb->mains_online;
+	mutex_unlock(&smb->lock);
+
+	return ret;
+}
+
+/**
+ * smb347_charging_status - returns status of charging
+ * @smb: pointer to smb347 charger instance
+ *
+ * Function returns charging status. %0 means no charging is in progress,
+ * %1 means pre-charging, %2 fast-charging and %3 taper-charging.
+ */
+static int smb347_charging_status(struct smb347_charger *smb)
+{
+	int ret;
+
+	if (!smb347_is_online(smb))
+		return 0;
+
+	ret = smb347_read(smb, STAT_C);
+	if (ret < 0)
+		return 0;
+
+	return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+}
+
+static int smb347_charging_set(struct smb347_charger *smb, bool enable)
+{
+	int ret = 0;
+
+	if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
+		dev_dbg(&smb->client->dev,
+			"charging enable/disable in SW disabled\n");
+		return 0;
+	}
+
+	mutex_lock(&smb->lock);
+	if (smb->charging_enabled != enable) {
+		ret = smb347_read(smb, CMD_A);
+		if (ret < 0)
+			goto out;
+
+		smb->charging_enabled = enable;
+
+		if (enable)
+			ret |= CMD_A_CHG_ENABLED;
+		else
+			ret &= ~CMD_A_CHG_ENABLED;
+
+		ret = smb347_write(smb, CMD_A, ret);
+	}
+out:
+	mutex_unlock(&smb->lock);
+	return ret;
+}
+
+static inline int smb347_charging_enable(struct smb347_charger *smb)
+{
+	return smb347_charging_set(smb, true);
+}
+
+static inline int smb347_charging_disable(struct smb347_charger *smb)
+{
+	return smb347_charging_set(smb, false);
+}
+
+static int smb347_update_online(struct smb347_charger *smb)
+{
+	int ret;
+
+	/*
+	 * Depending on whether valid power source is connected or not, we
+	 * disable or enable the charging. We do it manually because it
+	 * depends on how the platform has configured the valid inputs.
+	 */
+	if (smb347_is_online(smb)) {
+		ret = smb347_charging_enable(smb);
+		if (ret < 0)
+			dev_err(&smb->client->dev,
+				"failed to enable charging\n");
+	} else {
+		ret = smb347_charging_disable(smb);
+		if (ret < 0)
+			dev_err(&smb->client->dev,
+				"failed to disable charging\n");
+	}
+
+	return ret;
+}
+
+static int smb347_set_charge_current(struct smb347_charger *smb)
+{
+	int ret, val;
+
+	ret = smb347_read(smb, CFG_CHARGE_CURRENT);
+	if (ret < 0)
+		return ret;
+
+	if (smb->pdata->max_charge_current) {
+		val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+				    smb->pdata->max_charge_current);
+		if (val < 0)
+			return val;
+
+		ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
+		ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+	}
+
+	if (smb->pdata->pre_charge_current) {
+		val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+				    smb->pdata->pre_charge_current);
+		if (val < 0)
+			return val;
+
+		ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
+		ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+	}
+
+	if (smb->pdata->termination_current) {
+		val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+				    smb->pdata->termination_current);
+		if (val < 0)
+			return val;
+
+		ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
+		ret |= val;
+	}
+
+	return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+}
+
+static int smb347_set_current_limits(struct smb347_charger *smb)
+{
+	int ret, val;
+
+	ret = smb347_read(smb, CFG_CURRENT_LIMIT);
+	if (ret < 0)
+		return ret;
+
+	if (smb->pdata->mains_current_limit) {
+		val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+				    smb->pdata->mains_current_limit);
+		if (val < 0)
+			return val;
+
+		ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
+		ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+	}
+
+	if (smb->pdata->usb_hc_current_limit) {
+		val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+				    smb->pdata->usb_hc_current_limit);
+		if (val < 0)
+			return val;
+
+		ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
+		ret |= val;
+	}
+
+	return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+}
+
+static int smb347_set_voltage_limits(struct smb347_charger *smb)
+{
+	int ret, val;
+
+	ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
+	if (ret < 0)
+		return ret;
+
+	if (smb->pdata->pre_to_fast_voltage) {
+		val = smb->pdata->pre_to_fast_voltage;
+
+		/* uV */
+		val = clamp_val(val, 2400000, 3000000) - 2400000;
+		val /= 200000;
+
+		ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
+		ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+	}
+
+	if (smb->pdata->max_charge_voltage) {
+		val = smb->pdata->max_charge_voltage;
+
+		/* uV */
+		val = clamp_val(val, 3500000, 4500000) - 3500000;
+		val /= 20000;
+
+		ret |= val;
+	}
+
+	return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+}
+
+static int smb347_set_temp_limits(struct smb347_charger *smb)
+{
+	bool enable_therm_monitor = false;
+	int ret, val;
+
+	if (smb->pdata->chip_temp_threshold) {
+		val = smb->pdata->chip_temp_threshold;
+
+		/* degree C */
+		val = clamp_val(val, 100, 130) - 100;
+		val /= 10;
+
+		ret = smb347_read(smb, CFG_OTG);
+		if (ret < 0)
+			return ret;
+
+		ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
+		ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
+
+		ret = smb347_write(smb, CFG_OTG, ret);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = smb347_read(smb, CFG_TEMP_LIMIT);
+	if (ret < 0)
+		return ret;
+
+	if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+		val = smb->pdata->soft_cold_temp_limit;
+
+		val = clamp_val(val, 0, 15);
+		val /= 5;
+		/* this goes from higher to lower so invert the value */
+		val = ~val & 0x3;
+
+		ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
+		ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+
+		enable_therm_monitor = true;
+	}
+
+	if (smb->pdata->soft_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+		val = smb->pdata->soft_hot_temp_limit;
+
+		val = clamp_val(val, 40, 55) - 40;
+		val /= 5;
+
+		ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
+		ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+
+		enable_therm_monitor = true;
+	}
+
+	if (smb->pdata->hard_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+		val = smb->pdata->hard_cold_temp_limit;
+
+		val = clamp_val(val, -5, 10) + 5;
+		val /= 5;
+		/* this goes from higher to lower so invert the value */
+		val = ~val & 0x3;
+
+		ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
+		ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+
+		enable_therm_monitor = true;
+	}
+
+	if (smb->pdata->hard_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
+		val = smb->pdata->hard_hot_temp_limit;
+
+		val = clamp_val(val, 50, 65) - 50;
+		val /= 5;
+
+		ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
+		ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+
+		enable_therm_monitor = true;
+	}
+
+	ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * If any of the temperature limits are set, we also enable the
+	 * thermistor monitoring.
+	 *
+	 * When soft limits are hit, the device will start to compensate
+	 * current and/or voltage depending on the configuration.
+	 *
+	 * When hard limit is hit, the device will suspend charging
+	 * depending on the configuration.
+	 */
+	if (enable_therm_monitor) {
+		ret = smb347_read(smb, CFG_THERM);
+		if (ret < 0)
+			return ret;
+
+		ret &= ~CFG_THERM_MONITOR_DISABLED;
+
+		ret = smb347_write(smb, CFG_THERM, ret);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (smb->pdata->suspend_on_hard_temp_limit) {
+		ret = smb347_read(smb, CFG_SYSOK);
+		if (ret < 0)
+			return ret;
+
+		ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
+
+		ret = smb347_write(smb, CFG_SYSOK, ret);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (smb->pdata->soft_temp_limit_compensation !=
+	    SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
+		val = smb->pdata->soft_temp_limit_compensation & 0x3;
+
+		ret = smb347_read(smb, CFG_THERM);
+		if (ret < 0)
+			return ret;
+
+		ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
+		ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
+
+		ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
+		ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
+
+		ret = smb347_write(smb, CFG_THERM, ret);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (smb->pdata->charge_current_compensation) {
+		val = current_to_hw(ccc_tbl, ARRAY_SIZE(ccc_tbl),
+				    smb->pdata->charge_current_compensation);
+		if (val < 0)
+			return val;
+
+		ret = smb347_read(smb, CFG_OTG);
+		if (ret < 0)
+			return ret;
+
+		ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
+		ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
+
+		ret = smb347_write(smb, CFG_OTG, ret);
+		if (ret < 0)
+			return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * smb347_set_writable - enables/disables writing to non-volatile registers
+ * @smb: pointer to smb347 charger instance
+ *
+ * You can enable/disable writing to the non-volatile configuration
+ * registers by calling this function.
+ *
+ * Returns %0 on success and negative errno in case of failure.
+ */
+static int smb347_set_writable(struct smb347_charger *smb, bool writable)
+{
+	int ret;
+
+	ret = smb347_read(smb, CMD_A);
+	if (ret < 0)
+		return ret;
+
+	if (writable)
+		ret |= CMD_A_ALLOW_WRITE;
+	else
+		ret &= ~CMD_A_ALLOW_WRITE;
+
+	return smb347_write(smb, CMD_A, ret);
+}
+
+static int smb347_hw_init(struct smb347_charger *smb)
+{
+	int ret;
+
+	ret = smb347_set_writable(smb, true);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Program the platform specific configuration values to the device
+	 * first.
+	 */
+	ret = smb347_set_charge_current(smb);
+	if (ret < 0)
+		goto fail;
+
+	ret = smb347_set_current_limits(smb);
+	if (ret < 0)
+		goto fail;
+
+	ret = smb347_set_voltage_limits(smb);
+	if (ret < 0)
+		goto fail;
+
+	ret = smb347_set_temp_limits(smb);
+	if (ret < 0)
+		goto fail;
+
+	/* If USB charging is disabled we put the USB in suspend mode */
+	if (!smb->pdata->use_usb) {
+		ret = smb347_read(smb, CMD_A);
+		if (ret < 0)
+			goto fail;
+
+		ret |= CMD_A_SUSPEND_ENABLED;
+
+		ret = smb347_write(smb, CMD_A, ret);
+		if (ret < 0)
+			goto fail;
+	}
+
+	ret = smb347_read(smb, CFG_OTHER);
+	if (ret < 0)
+		goto fail;
+
+	/*
+	 * If configured by platform data, we enable hardware Auto-OTG
+	 * support for driving VBUS. Otherwise we disable it.
+	 */
+	ret &= ~CFG_OTHER_RID_MASK;
+	if (smb->pdata->use_usb_otg)
+		ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
+
+	ret = smb347_write(smb, CFG_OTHER, ret);
+	if (ret < 0)
+		goto fail;
+
+	ret = smb347_read(smb, CFG_PIN);
+	if (ret < 0)
+		goto fail;
+
+	/*
+	 * Make the charging functionality controllable by a write to the
+	 * command register unless pin control is specified in the platform
+	 * data.
+	 */
+	ret &= ~CFG_PIN_EN_CTRL_MASK;
+
+	switch (smb->pdata->enable_control) {
+	case SMB347_CHG_ENABLE_SW:
+		/* Do nothing, 0 means i2c control */
+		break;
+	case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
+		ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+		break;
+	case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
+		ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+		break;
+	}
+
+	/* Disable Automatic Power Source Detection (APSD) interrupt. */
+	ret &= ~CFG_PIN_EN_APSD_IRQ;
+
+	ret = smb347_write(smb, CFG_PIN, ret);
+	if (ret < 0)
+		goto fail;
+
+	ret = smb347_update_status(smb);
+	if (ret < 0)
+		goto fail;
+
+	ret = smb347_update_online(smb);
+
+fail:
+	smb347_set_writable(smb, false);
+	return ret;
+}
+
+static irqreturn_t smb347_interrupt(int irq, void *data)
+{
+	struct smb347_charger *smb = data;
+	int stat_c, irqstat_e, irqstat_c;
+	irqreturn_t ret = IRQ_NONE;
+
+	stat_c = smb347_read(smb, STAT_C);
+	if (stat_c < 0) {
+		dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+		return IRQ_NONE;
+	}
+
+	irqstat_c = smb347_read(smb, IRQSTAT_C);
+	if (irqstat_c < 0) {
+		dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+		return IRQ_NONE;
+	}
+
+	irqstat_e = smb347_read(smb, IRQSTAT_E);
+	if (irqstat_e < 0) {
+		dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+		return IRQ_NONE;
+	}
+
+	/*
+	 * If we get charger error we report the error back to user and
+	 * disable charging.
+	 */
+	if (stat_c & STAT_C_CHARGER_ERROR) {
+		dev_err(&smb->client->dev,
+			"error in charger, disabling charging\n");
+
+		smb347_charging_disable(smb);
+		power_supply_changed(&smb->battery);
+
+		ret = IRQ_HANDLED;
+	}
+
+	/*
+	 * If we reached the termination current the battery is charged and
+	 * we can update the status now. Charging is automatically
+	 * disabled by the hardware.
+	 */
+	if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
+		if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
+			power_supply_changed(&smb->battery);
+		ret = IRQ_HANDLED;
+	}
+
+	/*
+	 * If we got an under voltage interrupt it means that AC/USB input
+	 * was connected or disconnected.
+	 */
+	if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
+		if (smb347_update_status(smb) > 0) {
+			smb347_update_online(smb);
+			power_supply_changed(&smb->mains);
+			power_supply_changed(&smb->usb);
+		}
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int smb347_irq_set(struct smb347_charger *smb, bool enable)
+{
+	int ret;
+
+	ret = smb347_set_writable(smb, true);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Enable/disable interrupts for:
+	 *	- under voltage
+	 *	- termination current reached
+	 *	- charger error
+	 */
+	if (enable) {
+		ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
+		if (ret < 0)
+			goto fail;
+
+		ret = smb347_write(smb, CFG_STATUS_IRQ,
+				   CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
+		if (ret < 0)
+			goto fail;
+
+		ret = smb347_read(smb, CFG_PIN);
+		if (ret < 0)
+			goto fail;
+
+		ret |= CFG_PIN_EN_CHARGER_ERROR;
+
+		ret = smb347_write(smb, CFG_PIN, ret);
+	} else {
+		ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
+		if (ret < 0)
+			goto fail;
+
+		ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
+		if (ret < 0)
+			goto fail;
+
+		ret = smb347_read(smb, CFG_PIN);
+		if (ret < 0)
+			goto fail;
+
+		ret &= ~CFG_PIN_EN_CHARGER_ERROR;
+
+		ret = smb347_write(smb, CFG_PIN, ret);
+	}
+
+fail:
+	smb347_set_writable(smb, false);
+	return ret;
+}
+
+static inline int smb347_irq_enable(struct smb347_charger *smb)
+{
+	return smb347_irq_set(smb, true);
+}
+
+static inline int smb347_irq_disable(struct smb347_charger *smb)
+{
+	return smb347_irq_set(smb, false);
+}
+
+static int smb347_irq_init(struct smb347_charger *smb)
+{
+	const struct smb347_charger_platform_data *pdata = smb->pdata;
+	int ret, irq = gpio_to_irq(pdata->irq_gpio);
+
+	ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+	if (ret < 0)
+		goto fail;
+
+	ret = request_threaded_irq(irq, NULL, smb347_interrupt,
+				   IRQF_TRIGGER_FALLING, smb->client->name,
+				   smb);
+	if (ret < 0)
+		goto fail_gpio;
+
+	ret = smb347_set_writable(smb, true);
+	if (ret < 0)
+		goto fail_irq;
+
+	/*
+	 * Configure the STAT output to be suitable for interrupts: disable
+	 * all other output (except interrupts) and make it active low.
+	 */
+	ret = smb347_read(smb, CFG_STAT);
+	if (ret < 0)
+		goto fail_readonly;
+
+	ret &= ~CFG_STAT_ACTIVE_HIGH;
+	ret |= CFG_STAT_DISABLED;
+
+	ret = smb347_write(smb, CFG_STAT, ret);
+	if (ret < 0)
+		goto fail_readonly;
+
+	ret = smb347_irq_enable(smb);
+	if (ret < 0)
+		goto fail_readonly;
+
+	smb347_set_writable(smb, false);
+	smb->client->irq = irq;
+	return 0;
+
+fail_readonly:
+	smb347_set_writable(smb, false);
+fail_irq:
+	free_irq(irq, smb);
+fail_gpio:
+	gpio_free(pdata->irq_gpio);
+fail:
+	smb->client->irq = 0;
+	return ret;
+}
+
+static int smb347_mains_get_property(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     union power_supply_propval *val)
+{
+	struct smb347_charger *smb =
+		container_of(psy, struct smb347_charger, mains);
+
+	if (prop == POWER_SUPPLY_PROP_ONLINE) {
+		val->intval = smb->mains_online;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static enum power_supply_property smb347_mains_properties[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int smb347_usb_get_property(struct power_supply *psy,
+				   enum power_supply_property prop,
+				   union power_supply_propval *val)
+{
+	struct smb347_charger *smb =
+		container_of(psy, struct smb347_charger, usb);
+
+	if (prop == POWER_SUPPLY_PROP_ONLINE) {
+		val->intval = smb->usb_online;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static enum power_supply_property smb347_usb_properties[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int smb347_battery_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb347_charger *smb =
+			container_of(psy, struct smb347_charger, battery);
+	const struct smb347_charger_platform_data *pdata = smb->pdata;
+	int ret;
+
+	ret = smb347_update_status(smb);
+	if (ret < 0)
+		return ret;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!smb347_is_online(smb)) {
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+			break;
+		}
+		if (smb347_charging_status(smb))
+			val->intval = POWER_SUPPLY_STATUS_CHARGING;
+		else
+			val->intval = POWER_SUPPLY_STATUS_FULL;
+		break;
+
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		if (!smb347_is_online(smb))
+			return -ENODATA;
+
+		/*
+		 * We handle trickle and pre-charging the same, and taper
+		 * and none the same.
+		 */
+		switch (smb347_charging_status(smb)) {
+		case 1:
+			val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+			break;
+		case 2:
+			val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+			break;
+		default:
+			val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+			break;
+		}
+		break;
+
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = pdata->battery_info.technology;
+		break;
+
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+		val->intval = pdata->battery_info.voltage_min_design;
+		break;
+
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = pdata->battery_info.voltage_max_design;
+		break;
+
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		if (!smb347_is_online(smb))
+			return -ENODATA;
+		ret = smb347_read(smb, STAT_A);
+		if (ret < 0)
+			return ret;
+
+		ret &= STAT_A_FLOAT_VOLTAGE_MASK;
+		if (ret > 0x3d)
+			ret = 0x3d;
+
+		val->intval = 3500000 + ret * 20000;
+		break;
+
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		if (!smb347_is_online(smb))
+			return -ENODATA;
+
+		ret = smb347_read(smb, STAT_B);
+		if (ret < 0)
+			return ret;
+
+		/*
+		 * The current value is composition of FCC and PCC values
+		 * and we can detect which table to use from bit 5.
+		 */
+		if (ret & 0x20) {
+			val->intval = hw_to_current(fcc_tbl,
+						    ARRAY_SIZE(fcc_tbl),
+						    ret & 7);
+		} else {
+			ret >>= 3;
+			val->intval = hw_to_current(pcc_tbl,
+						    ARRAY_SIZE(pcc_tbl),
+						    ret & 7);
+		}
+		break;
+
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		val->intval = pdata->battery_info.charge_full_design;
+		break;
+
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = pdata->battery_info.name;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum power_supply_property smb347_battery_properties[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int smb347_debugfs_show(struct seq_file *s, void *data)
+{
+	struct smb347_charger *smb = s->private;
+	int ret;
+	u8 reg;
+
+	seq_printf(s, "Control registers:\n");
+	seq_printf(s, "==================\n");
+	for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
+		ret = smb347_read(smb, reg);
+		seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+	}
+	seq_printf(s, "\n");
+
+	seq_printf(s, "Command registers:\n");
+	seq_printf(s, "==================\n");
+	ret = smb347_read(smb, CMD_A);
+	seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
+	ret = smb347_read(smb, CMD_B);
+	seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
+	ret = smb347_read(smb, CMD_C);
+	seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
+	seq_printf(s, "\n");
+
+	seq_printf(s, "Interrupt status registers:\n");
+	seq_printf(s, "===========================\n");
+	for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
+		ret = smb347_read(smb, reg);
+		seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+	}
+	seq_printf(s, "\n");
+
+	seq_printf(s, "Status registers:\n");
+	seq_printf(s, "=================\n");
+	for (reg = STAT_A; reg <= STAT_E; reg++) {
+		ret = smb347_read(smb, reg);
+		seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+	}
+
+	return 0;
+}
+
+static int smb347_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smb347_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations smb347_debugfs_fops = {
+	.open		= smb347_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int smb347_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	static char *battery[] = { "smb347-battery" };
+	const struct smb347_charger_platform_data *pdata;
+	struct device *dev = &client->dev;
+	struct smb347_charger *smb;
+	int ret;
+
+	pdata = dev->platform_data;
+	if (!pdata)
+		return -EINVAL;
+
+	if (!pdata->use_mains && !pdata->use_usb)
+		return -EINVAL;
+
+	smb = devm_kzalloc(dev, sizeof(*smb), GFP_KERNEL);
+	if (!smb)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, smb);
+
+	mutex_init(&smb->lock);
+	smb->client = client;
+	smb->pdata = pdata;
+
+	ret = smb347_hw_init(smb);
+	if (ret < 0)
+		return ret;
+
+	smb->mains.name = "smb347-mains";
+	smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+	smb->mains.get_property = smb347_mains_get_property;
+	smb->mains.properties = smb347_mains_properties;
+	smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+	smb->mains.supplied_to = battery;
+	smb->mains.num_supplicants = ARRAY_SIZE(battery);
+
+	smb->usb.name = "smb347-usb";
+	smb->usb.type = POWER_SUPPLY_TYPE_USB;
+	smb->usb.get_property = smb347_usb_get_property;
+	smb->usb.properties = smb347_usb_properties;
+	smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+	smb->usb.supplied_to = battery;
+	smb->usb.num_supplicants = ARRAY_SIZE(battery);
+
+	smb->battery.name = "smb347-battery";
+	smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+	smb->battery.get_property = smb347_battery_get_property;
+	smb->battery.properties = smb347_battery_properties;
+	smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
+
+	ret = power_supply_register(dev, &smb->mains);
+	if (ret < 0)
+		return ret;
+
+	ret = power_supply_register(dev, &smb->usb);
+	if (ret < 0) {
+		power_supply_unregister(&smb->mains);
+		return ret;
+	}
+
+	ret = power_supply_register(dev, &smb->battery);
+	if (ret < 0) {
+		power_supply_unregister(&smb->usb);
+		power_supply_unregister(&smb->mains);
+		return ret;
+	}
+
+	/*
+	 * Interrupt pin is optional. If it is connected, we setup the
+	 * interrupt support here.
+	 */
+	if (pdata->irq_gpio >= 0) {
+		ret = smb347_irq_init(smb);
+		if (ret < 0) {
+			dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
+			dev_warn(dev, "disabling IRQ support\n");
+		}
+	}
+
+	smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
+					  &smb347_debugfs_fops);
+	return 0;
+}
+
+static int smb347_remove(struct i2c_client *client)
+{
+	struct smb347_charger *smb = i2c_get_clientdata(client);
+
+	if (!IS_ERR_OR_NULL(smb->dentry))
+		debugfs_remove(smb->dentry);
+
+	if (client->irq) {
+		smb347_irq_disable(smb);
+		free_irq(client->irq, smb);
+		gpio_free(smb->pdata->irq_gpio);
+	}
+
+	power_supply_unregister(&smb->battery);
+	power_supply_unregister(&smb->usb);
+	power_supply_unregister(&smb->mains);
+	return 0;
+}
+
+static const struct i2c_device_id smb347_id[] = {
+	{ "smb347", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, smb347_id);
+
+static struct i2c_driver smb347_driver = {
+	.driver = {
+		.name = "smb347",
+	},
+	.probe        = smb347_probe,
+	.remove       = __devexit_p(smb347_remove),
+	.id_table     = smb347_id,
+};
+
+static int __init smb347_init(void)
+{
+	return i2c_add_driver(&smb347_driver);
+}
+module_init(smb347_init);
+
+static void __exit smb347_exit(void)
+{
+	i2c_del_driver(&smb347_driver);
+}
+module_exit(smb347_exit);
+
+MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("SMB347 battery charger driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:smb347");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 636ebb2..8c9a607 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -316,19 +316,7 @@
 	.remove		= __devexit_p(z2_batt_remove),
 	.id_table	= z2_batt_id,
 };
-
-static int __init z2_batt_init(void)
-{
-	return i2c_add_driver(&z2_batt_driver);
-}
-
-static void __exit z2_batt_exit(void)
-{
-	i2c_del_driver(&z2_batt_driver);
-}
-
-module_init(z2_batt_init);
-module_exit(z2_batt_exit);
+module_i2c_driver(z2_batt_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>");
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 17499a5..81fd606 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -138,9 +138,10 @@
 	rdesc->type = REGULATOR_VOLTAGE;
 	rdesc->owner = THIS_MODULE;
 	sreg->mfd = anatopmfd;
-	ret = of_property_read_u32(np, "reg", &sreg->control_reg);
+	ret = of_property_read_u32(np, "anatop-reg-offset",
+				   &sreg->control_reg);
 	if (ret) {
-		dev_err(dev, "no reg property set\n");
+		dev_err(dev, "no anatop-reg-offset property set\n");
 		goto anatop_probe_end;
 	}
 	ret = of_property_read_u32(np, "anatop-vol-bit-width",
@@ -213,7 +214,7 @@
 	{ /* end */ }
 };
 
-static struct platform_driver anatop_regulator = {
+static struct platform_driver anatop_regulator_driver = {
 	.driver = {
 		.name	= "anatop_regulator",
 		.owner  = THIS_MODULE,
@@ -225,13 +226,13 @@
 
 static int __init anatop_regulator_init(void)
 {
-	return platform_driver_register(&anatop_regulator);
+	return platform_driver_register(&anatop_regulator_driver);
 }
 postcore_initcall(anatop_regulator_init);
 
 static void __exit anatop_regulator_exit(void)
 {
-	platform_driver_unregister(&anatop_regulator);
+	platform_driver_unregister(&anatop_regulator_driver);
 }
 module_exit(anatop_regulator_exit);
 
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c056abd..e70dd38 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2992,14 +2992,14 @@
 	if (rdev == NULL)
 		return;
 
+	if (rdev->supply)
+		regulator_put(rdev->supply);
 	mutex_lock(&regulator_list_mutex);
 	debugfs_remove_recursive(rdev->debugfs);
 	flush_work_sync(&rdev->disable_work.work);
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
 	list_del(&rdev->list);
-	if (rdev->supply)
-		regulator_put(rdev->supply);
 	kfree(rdev->constraints);
 	device_unregister(&rdev->dev);
 	mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index 30d0a15..cacd33c 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -18,7 +18,6 @@
 
 /**
  * regulator_register_fixed - register a no-op fixed regulator
- * @name: supply name
  * @id: platform device id
  * @supplies: consumers for this regulator
  * @num_supplies: number of consumers
@@ -32,7 +31,7 @@
 	if (!data)
 		return NULL;
 
-	data->cfg.supply_name = "dummy";
+	data->cfg.supply_name = "fixed-dummy";
 	data->cfg.microvolts = 0;
 	data->cfg.gpio = -EINVAL;
 	data->cfg.enabled_at_boot = 1;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index e8cfc99..845aa22 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -552,7 +552,7 @@
 	mc13xxx_lock(mc13892);
 	ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
 	if (ret)
-		goto err_free;
+		goto err_unlock;
 
 	/* enable switch auto mode */
 	if ((val & 0x0000FFFF) == 0x45d0) {
@@ -562,7 +562,7 @@
 			MC13892_SWITCHERS4_SW1MODE_AUTO |
 			MC13892_SWITCHERS4_SW2MODE_AUTO);
 		if (ret)
-			goto err_free;
+			goto err_unlock;
 
 		ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5,
 			MC13892_SWITCHERS5_SW3MODE_M |
@@ -570,7 +570,7 @@
 			MC13892_SWITCHERS5_SW3MODE_AUTO |
 			MC13892_SWITCHERS5_SW4MODE_AUTO);
 		if (ret)
-			goto err_free;
+			goto err_unlock;
 	}
 	mc13xxx_unlock(mc13892);
 
@@ -612,10 +612,10 @@
 err:
 	while (--i >= 0)
 		regulator_unregister(priv->regulators[i]);
+	return ret;
 
-err_free:
+err_unlock:
 	mc13xxx_unlock(mc13892);
-
 	return ret;
 }
 
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 58447db..4ca2db0 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -311,8 +311,7 @@
 	struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
 	const struct s5m_voltage_desc *desc;
 	int reg_id = rdev_get_id(rdev);
-	int reg, mask, ret;
-	int i;
+	int sel, reg, mask, ret;
 	u8 val;
 
 	switch (reg_id) {
@@ -333,19 +332,20 @@
 
 	desc = reg_voltage_map[reg_id];
 
-	i = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
-	if (i < 0)
-		return i;
+	sel = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
+	if (sel < 0)
+		return sel;
 
 	ret = s5m8767_get_voltage_register(rdev, &reg);
 	if (ret)
 		return ret;
 
 	s5m_reg_read(s5m8767->iodev, reg, &val);
-	val = val & mask;
+	val &= ~mask;
+	val |= sel;
 
 	ret = s5m_reg_write(s5m8767->iodev, reg, val);
-	*selector = i;
+	*selector = sel;
 
 	return ret;
 }
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 29b615c..cfc1f16 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -79,6 +79,11 @@
 				     unsigned selector)
 {
 	struct tps6586x_regulator *info = rdev_get_drvdata(rdev);
+	int rid = rdev_get_id(rdev);
+
+	/* LDO0 has minimal voltage 1.2V rather than 1.25V */
+	if ((rid == TPS6586X_ID_LDO_0) && (selector == 0))
+		return (info->voltages[0] - 50) * 1000;
 
 	return info->voltages[selector] * 1000;
 }
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 4904a40..ff810e7 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -380,13 +380,15 @@
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(wm831x_dcdc_ilim); i++) {
-		if (max_uA <= wm831x_dcdc_ilim[i])
+		if ((min_uA <= wm831x_dcdc_ilim[i]) &&
+		    (wm831x_dcdc_ilim[i] <= max_uA))
 			break;
 	}
 	if (i == ARRAY_SIZE(wm831x_dcdc_ilim))
 		return -EINVAL;
 
-	return wm831x_set_bits(wm831x, reg, WM831X_DC1_HC_THR_MASK, i);
+	return wm831x_set_bits(wm831x, reg, WM831X_DC1_HC_THR_MASK,
+			       i << WM831X_DC1_HC_THR_SHIFT);
 }
 
 static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
@@ -400,7 +402,8 @@
 	if (val < 0)
 		return val;
 
-	return wm831x_dcdc_ilim[val & WM831X_DC1_HC_THR_MASK];
+	val = (val & WM831X_DC1_HC_THR_MASK) >> WM831X_DC1_HC_THR_SHIFT;
+	return wm831x_dcdc_ilim[val];
 }
 
 static struct regulator_ops wm831x_buckv_ops = {
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 634aac3..b414e09 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -101,7 +101,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(wm831x_isinkv_values); i++) {
 		int val = wm831x_isinkv_values[i];
-		if (min_uA >= val && val <= max_uA) {
+		if (min_uA <= val && val <= max_uA) {
 			ret = wm831x_set_bits(wm831x, isink->reg,
 					      WM831X_CS1_ISEL_MASK, i);
 			return ret;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index f1e4ab0..641e9f6 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -506,22 +506,19 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
-	int ctrl_reg = ldo->base + WM831X_LDO_CONTROL;
 	int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
 	int ret;
 
 
 	switch (mode) {
 	case REGULATOR_MODE_NORMAL:
-		ret = wm831x_set_bits(wm831x, on_reg,
-				      WM831X_LDO7_ON_MODE, 0);
+		ret = wm831x_set_bits(wm831x, on_reg, WM831X_LDO7_ON_MODE, 0);
 		if (ret < 0)
 			return ret;
 		break;
 
 	case REGULATOR_MODE_IDLE:
-		ret = wm831x_set_bits(wm831x, ctrl_reg,
-				      WM831X_LDO7_ON_MODE,
+		ret = wm831x_set_bits(wm831x, on_reg, WM831X_LDO7_ON_MODE,
 				      WM831X_LDO7_ON_MODE);
 		if (ret < 0)
 			return ret;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index ab1e183a7..05ecfb8 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -99,7 +99,7 @@
 {
 	int i;
 
-	for (i = ARRAY_SIZE(isink_cur) - 1; i >= 0; i--) {
+	for (i = 0; i < ARRAY_SIZE(isink_cur); i++) {
 		if (min_uA <= isink_cur[i] && max_uA >= isink_cur[i]) {
 			*setting = i;
 			return 0;
@@ -186,7 +186,7 @@
 		return 0;
 	}
 
-	return DIV_ROUND_CLOSEST(isink_cur[val], 100);
+	return isink_cur[val];
 }
 
 /* turn on ISINK followed by DCDC */
@@ -495,25 +495,25 @@
 		val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER)
 			& ~WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC1_LOW_POWER,
-			wm8350->pmic.dcdc1_hib_mode);
+			val | wm8350->pmic.dcdc1_hib_mode);
 		break;
 	case WM8350_DCDC_3:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC3_LOW_POWER)
 			& ~WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC3_LOW_POWER,
-			wm8350->pmic.dcdc3_hib_mode);
+			val | wm8350->pmic.dcdc3_hib_mode);
 		break;
 	case WM8350_DCDC_4:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC4_LOW_POWER)
 			& ~WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC4_LOW_POWER,
-			wm8350->pmic.dcdc4_hib_mode);
+			val | wm8350->pmic.dcdc4_hib_mode);
 		break;
 	case WM8350_DCDC_6:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC6_LOW_POWER)
 			& ~WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC6_LOW_POWER,
-			wm8350->pmic.dcdc6_hib_mode);
+			val | wm8350->pmic.dcdc6_hib_mode);
 		break;
 	case WM8350_DCDC_2:
 	case WM8350_DCDC_5:
@@ -535,25 +535,25 @@
 		val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER);
 		wm8350->pmic.dcdc1_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC1_LOW_POWER,
-			WM8350_DCDC_HIB_MODE_DIS);
+				 val | WM8350_DCDC_HIB_MODE_DIS);
 		break;
 	case WM8350_DCDC_3:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC3_LOW_POWER);
 		wm8350->pmic.dcdc3_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC3_LOW_POWER,
-			WM8350_DCDC_HIB_MODE_DIS);
+				 val | WM8350_DCDC_HIB_MODE_DIS);
 		break;
 	case WM8350_DCDC_4:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC4_LOW_POWER);
 		wm8350->pmic.dcdc4_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC4_LOW_POWER,
-			WM8350_DCDC_HIB_MODE_DIS);
+				 val | WM8350_DCDC_HIB_MODE_DIS);
 		break;
 	case WM8350_DCDC_6:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC6_LOW_POWER);
 		wm8350->pmic.dcdc6_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC6_LOW_POWER,
-			WM8350_DCDC_HIB_MODE_DIS);
+				 val | WM8350_DCDC_HIB_MODE_DIS);
 		break;
 	case WM8350_DCDC_2:
 	case WM8350_DCDC_5:
@@ -575,13 +575,13 @@
 		val = wm8350_reg_read(wm8350, WM8350_DCDC2_CONTROL)
 		    & ~WM8350_DC2_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC2_CONTROL, val |
-				 WM8350_DC2_HIB_MODE_ACTIVE);
+		    (WM8350_DC2_HIB_MODE_ACTIVE << WM8350_DC2_HIB_MODE_SHIFT));
 		break;
 	case WM8350_DCDC_5:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC5_CONTROL)
-		    & ~WM8350_DC2_HIB_MODE_MASK;
+		    & ~WM8350_DC5_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC5_CONTROL, val |
-				 WM8350_DC5_HIB_MODE_ACTIVE);
+		    (WM8350_DC5_HIB_MODE_ACTIVE << WM8350_DC5_HIB_MODE_SHIFT));
 		break;
 	default:
 		return -EINVAL;
@@ -600,13 +600,13 @@
 		val = wm8350_reg_read(wm8350, WM8350_DCDC2_CONTROL)
 		    & ~WM8350_DC2_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC2_CONTROL, val |
-				 WM8350_DC2_HIB_MODE_DISABLE);
+		    (WM8350_DC2_HIB_MODE_DISABLE << WM8350_DC2_HIB_MODE_SHIFT));
 		break;
 	case WM8350_DCDC_5:
 		val = wm8350_reg_read(wm8350, WM8350_DCDC5_CONTROL)
-		    & ~WM8350_DC2_HIB_MODE_MASK;
+		    & ~WM8350_DC5_HIB_MODE_MASK;
 		wm8350_reg_write(wm8350, WM8350_DCDC5_CONTROL, val |
-				 WM8350_DC2_HIB_MODE_DISABLE);
+		    (WM8350_DC5_HIB_MODE_DISABLE << WM8350_DC5_HIB_MODE_SHIFT));
 		break;
 	default:
 		return -EINVAL;
@@ -749,7 +749,7 @@
 
 	/* all LDOs have same mV bits */
 	val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_HIB_MODE_MASK;
-	wm8350_reg_write(wm8350, volt_reg, WM8350_LDO1_HIB_MODE_DIS);
+	wm8350_reg_write(wm8350, volt_reg, val | WM8350_LDO1_HIB_MODE_DIS);
 	return 0;
 }
 
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 70277a5..85d31a6 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -50,16 +50,9 @@
 	return simple_read_from_buffer(userbuf, count, ppos, trace->va, len);
 }
 
-static int rproc_open_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
 static const struct file_operations trace_rproc_ops = {
 	.read = rproc_trace_read,
-	.open = rproc_open_generic,
+	.open = simple_open,
 	.llseek	= generic_file_llseek,
 };
 
@@ -94,7 +87,7 @@
 
 static const struct file_operations rproc_state_ops = {
 	.read = rproc_state_read,
-	.open = rproc_open_generic,
+	.open = simple_open,
 	.llseek	= generic_file_llseek,
 };
 
@@ -114,7 +107,7 @@
 
 static const struct file_operations rproc_name_ops = {
 	.read = rproc_name_read,
-	.open = rproc_open_generic,
+	.open = simple_open,
 	.llseek	= generic_file_llseek,
 };
 
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index afee0e8..feddefc 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -72,9 +72,9 @@
 	struct pm860x_rtc_info *info = dev_get_drvdata(dev);
 
 	if (enabled)
-		pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, ALARM);
+		pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, ALARM_EN);
 	else
-		pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, 0);
+		pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0);
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 5502923..c9f890b 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -213,7 +213,6 @@
 		.name = "rtc-efi",
 		.owner = THIS_MODULE,
 	},
-	.probe = efi_rtc_probe,
 	.remove = __exit_p(efi_rtc_remove),
 };
 
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 692de73..684ef4b 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -339,8 +339,7 @@
 	dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
 
 	/* Enable the clockwatch on ST Variants */
-	if ((ldata->hw_designer == AMBA_VENDOR_ST) &&
-	    (ldata->hw_revision > 1))
+	if (ldata->hw_designer == AMBA_VENDOR_ST)
 		writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
 		       ldata->base + RTC_CR);
 
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 7f8e6c2..33b6ba0 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -122,6 +122,7 @@
 static int __devinit r9701_probe(struct spi_device *spi)
 {
 	struct rtc_device *rtc;
+	struct rtc_time dt;
 	unsigned char tmp;
 	int res;
 
@@ -132,6 +133,27 @@
 		return -ENODEV;
 	}
 
+	/*
+	 * The device seems to be present. Now check if the registers
+	 * contain invalid values. If so, try to write a default date:
+	 * 2000/1/1 00:00:00
+	 */
+	r9701_get_datetime(&spi->dev, &dt);
+	if (rtc_valid_tm(&dt)) {
+		dev_info(&spi->dev, "trying to repair invalid date/time\n");
+		dt.tm_sec  = 0;
+		dt.tm_min  = 0;
+		dt.tm_hour = 0;
+		dt.tm_mday = 1;
+		dt.tm_mon  = 0;
+		dt.tm_year = 100;
+
+		if (r9701_set_datetime(&spi->dev, &dt)) {
+			dev_err(&spi->dev, "cannot repair RTC register\n");
+			return -ENODEV;
+		}
+	}
+
 	rtc = rtc_device_register("r9701",
 				&spi->dev, &r9701_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rtc))
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9ccea13..3f3a297 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -40,6 +40,10 @@
 	TYPE_S3C64XX,
 };
 
+struct s3c_rtc_drv_data {
+	int cpu_type;
+};
+
 /* I have yet to find an S3C implementation with more than one
  * of these rtc blocks in */
 
@@ -446,10 +450,12 @@
 static inline int s3c_rtc_get_driver_data(struct platform_device *pdev)
 {
 #ifdef CONFIG_OF
+	struct s3c_rtc_drv_data *data;
 	if (pdev->dev.of_node) {
 		const struct of_device_id *match;
 		match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node);
-		return match->data;
+		data = (struct s3c_rtc_drv_data *) match->data;
+		return data->cpu_type;
 	}
 #endif
 	return platform_get_device_id(pdev)->driver_data;
@@ -664,20 +670,27 @@
 #define s3c_rtc_resume  NULL
 #endif
 
+static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
+	[TYPE_S3C2410] = { TYPE_S3C2410 },
+	[TYPE_S3C2416] = { TYPE_S3C2416 },
+	[TYPE_S3C2443] = { TYPE_S3C2443 },
+	[TYPE_S3C64XX] = { TYPE_S3C64XX },
+};
+
 #ifdef CONFIG_OF
 static const struct of_device_id s3c_rtc_dt_match[] = {
 	{
-		.compatible = "samsung,s3c2410-rtc"
-		.data = TYPE_S3C2410,
+		.compatible = "samsung,s3c2410-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C2410],
 	}, {
-		.compatible = "samsung,s3c2416-rtc"
-		.data = TYPE_S3C2416,
+		.compatible = "samsung,s3c2416-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C2416],
 	}, {
-		.compatible = "samsung,s3c2443-rtc"
-		.data = TYPE_S3C2443,
+		.compatible = "samsung,s3c2443-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C2443],
 	}, {
-		.compatible = "samsung,s3c6410-rtc"
-		.data = TYPE_S3C64XX,
+		.compatible = "samsung,s3c6410-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C64XX],
 	},
 	{},
 };
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 4c2c6df..258abea 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -112,6 +112,7 @@
 #define BIT_RTC_CTRL_REG_TEST_MODE_M             0x10
 #define BIT_RTC_CTRL_REG_SET_32_COUNTER_M        0x20
 #define BIT_RTC_CTRL_REG_GET_TIME_M              0x40
+#define BIT_RTC_CTRL_REG_RTC_V_OPT               0x80
 
 /* RTC_STATUS_REG bitfields */
 #define BIT_RTC_STATUS_REG_RUN_M                 0x02
@@ -235,25 +236,57 @@
 	unsigned char rtc_data[ALL_TIME_REGS + 1];
 	int ret;
 	u8 save_control;
+	u8 rtc_control;
 
 	ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
-	if (ret < 0)
+	if (ret < 0) {
+		dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret);
 		return ret;
+	}
+	/* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */
+	if (twl_class_is_6030()) {
+		if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) {
+			save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M;
+			ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+			if (ret < 0) {
+				dev_err(dev, "%s clr GET_TIME, error %d\n",
+					__func__, ret);
+				return ret;
+			}
+		}
+	}
 
-	save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
+	/* Copy RTC counting registers to static registers or latches */
+	rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M;
 
-	ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
-	if (ret < 0)
+	/* for twl6030/32 enable read access to static shadowed registers */
+	if (twl_class_is_6030())
+		rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT;
+
+	ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG);
+	if (ret < 0) {
+		dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret);
 		return ret;
+	}
 
 	ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
 			(rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
 
 	if (ret < 0) {
-		dev_err(dev, "rtc_read_time error %d\n", ret);
+		dev_err(dev, "%s: reading data, error %d\n", __func__, ret);
 		return ret;
 	}
 
+	/* for twl6030 restore original state of rtc control register */
+	if (twl_class_is_6030()) {
+		ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+		if (ret < 0) {
+			dev_err(dev, "%s: restore CTRL_REG, error %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
 	tm->tm_sec = bcd2bin(rtc_data[0]);
 	tm->tm_min = bcd2bin(rtc_data[1]);
 	tm->tm_hour = bcd2bin(rtc_data[2]);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a06e608..29684c8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -619,6 +619,7 @@
 
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/ufs/Kconfig"
 
 config SCSI_HPTIOP
 	tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ad24e06..8deedea 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -108,6 +108,7 @@
 obj-$(CONFIG_MEGARAID_NEWGEN)	+= megaraid/
 obj-$(CONFIG_MEGARAID_SAS)	+= megaraid/
 obj-$(CONFIG_SCSI_MPT2SAS)	+= mpt2sas/
+obj-$(CONFIG_SCSI_UFSHCD)	+= ufs/
 obj-$(CONFIG_SCSI_ACARD)	+= atp870u.o
 obj-$(CONFIG_SCSI_SUNESP)	+= esp_scsi.o	sun_esp.o
 obj-$(CONFIG_SCSI_GDTH)		+= gdth.o
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 7d48700..9328121 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -341,10 +341,10 @@
 "				(0/256ms,1/128ms,2/64ms,3/32ms)\n"
 "	slowcrc			Turn on the SLOWCRC bit (Rev B only)\n"		 
 "\n"
-"	Sample /etc/modprobe.conf line:\n"
-"		Enable verbose logging\n"
-"		Set tag depth on Controller 2/Target 2 to 10 tags\n"
-"		Shorten the selection timeout to 128ms\n"
+"	Sample modprobe configuration file:\n"
+"	#	Enable verbose logging\n"
+"	#	Set tag depth on Controller 2/Target 2 to 10 tags\n"
+"	#	Shorten the selection timeout to 128ms\n"
 "\n"
 "	options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
 );
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6251bb..5a477cd 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -360,10 +360,10 @@
 "	seltime:<int>		Selection Timeout\n"
 "				(0/256ms,1/128ms,2/64ms,3/32ms)\n"
 "\n"
-"	Sample /etc/modprobe.conf line:\n"
-"		Toggle EISA/VLB probing\n"
-"		Set tag depth on Controller 1/Target 1 to 10 tags\n"
-"		Shorten the selection timeout to 128ms\n"
+"	Sample modprobe configuration file:\n"
+"	#	Toggle EISA/VLB probing\n"
+"	#	Set tag depth on Controller 1/Target 1 to 10 tags\n"
+"	#	Shorten the selection timeout to 128ms\n"
 "\n"
 "	options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
 );
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index f29d512..68ce085 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -2582,7 +2582,7 @@
 	 * this than via the PCI device table
 	 */
 	if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
-		error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+		atpdev->chip_ver = pdev->revision;
 		if (atpdev->chip_ver < 2)
 			goto err_eio;
 	}
@@ -2601,7 +2601,7 @@
 	base_io &= 0xfffffff8;
 
 	if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
-		error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+		atpdev->chip_ver = pdev->revision;
 		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
 
 		host_id = inb(base_io + 0x39);
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index a796de9..4ad7e36 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -225,9 +225,9 @@
 };
 
 struct bfa_iocfc_s {
+	bfa_fsm_t		fsm;
 	struct bfa_s		*bfa;
 	struct bfa_iocfc_cfg_s	cfg;
-	int			action;
 	u32		req_cq_pi[BFI_IOC_MAX_CQS];
 	u32		rsp_cq_ci[BFI_IOC_MAX_CQS];
 	u8		hw_qid[BFI_IOC_MAX_CQS];
@@ -236,7 +236,9 @@
 	struct bfa_cb_qe_s	dis_hcb_qe;
 	struct bfa_cb_qe_s	en_hcb_qe;
 	struct bfa_cb_qe_s	stats_hcb_qe;
-	bfa_boolean_t		cfgdone;
+	bfa_boolean_t		submod_enabled;
+	bfa_boolean_t		cb_reqd;	/* Driver call back reqd */
+	bfa_status_t		op_status;	/* Status of bfa iocfc op */
 
 	struct bfa_dma_s	cfg_info;
 	struct bfi_iocfc_cfg_s *cfginfo;
@@ -341,8 +343,6 @@
 void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
 				 u32 *end);
 void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
-wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
-wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
 int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
 				struct bfi_pbc_vport_s *pbc_vport);
 
@@ -428,7 +428,6 @@
 
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
-void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)		\
 	bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
 
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 4bd546b..456e576 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -200,13 +200,431 @@
 #define DEF_CFG_NUM_SBOOT_LUNS		16
 
 /*
+ * IOCFC state machine definitions/declarations
+ */
+bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, operational,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_write,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_failed,
+		   struct bfa_iocfc_s, enum iocfc_event);
+
+/*
  * forward declaration for IOC FC functions
  */
+static void bfa_iocfc_start_submod(struct bfa_s *bfa);
+static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
+static void bfa_iocfc_send_cfg(void *bfa_arg);
 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
+
+static void
+bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
+{
+}
+
+static void
+bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_INIT:
+	case IOCFC_E_ENABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_dconf_modinit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_DCONF_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_CFG_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
+{
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+		     bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_START:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+		break;
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_fcport_init(iocfc->bfa);
+	bfa_iocfc_start_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_dconf_modexit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_DCONF_DONE:
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_DISABLED:
+		bfa_isr_disable(iocfc->bfa);
+		bfa_iocfc_disable_submod(iocfc->bfa);
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
+			     bfa_iocfc_stop_cb, iocfc->bfa);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_CFG_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_DISABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	bfa_iocfc_disable_submod(iocfc->bfa);
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+		     bfa_iocfc_disable_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_ENABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	bfa_iocfc_disable_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+		     bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_ioc_disable(&iocfc->bfa->ioc);
+		break;
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+		break;
+	case IOCFC_E_IOC_DISABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+			     bfa_iocfc_disable_cb, iocfc->bfa);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
 
 /*
  * BFA Interrupt handling functions
@@ -231,16 +649,19 @@
 	}
 }
 
-static inline void
+bfa_boolean_t
 bfa_isr_rspq(struct bfa_s *bfa, int qid)
 {
 	struct bfi_msg_s *m;
 	u32	pi, ci;
 	struct list_head *waitq;
+	bfa_boolean_t ret;
 
 	ci = bfa_rspq_ci(bfa, qid);
 	pi = bfa_rspq_pi(bfa, qid);
 
+	ret = (ci != pi);
+
 	while (ci != pi) {
 		m = bfa_rspq_elem(bfa, qid, ci);
 		WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
@@ -260,6 +681,8 @@
 	waitq = bfa_reqq(bfa, qid);
 	if (!list_empty(waitq))
 		bfa_reqq_resume(bfa, qid);
+
+	return ret;
 }
 
 static inline void
@@ -320,6 +743,7 @@
 {
 	u32 intr, qintr;
 	int queue;
+	bfa_boolean_t rspq_comp = BFA_FALSE;
 
 	intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
@@ -332,11 +756,12 @@
 	 */
 	if (bfa->queue_process) {
 		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
-			bfa_isr_rspq(bfa, queue);
+			if (bfa_isr_rspq(bfa, queue))
+				rspq_comp = BFA_TRUE;
 	}
 
 	if (!intr)
-		return BFA_TRUE;
+		return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
 
 	/*
 	 * CPE completion queue interrupt
@@ -525,11 +950,9 @@
 	 * Enable interrupt coalescing if it is driver init path
 	 * and not ioc disable/enable path.
 	 */
-	if (!iocfc->cfgdone)
+	if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
 		cfg_info->intr_attr.coalesce = BFA_TRUE;
 
-	iocfc->cfgdone = BFA_FALSE;
-
 	/*
 	 * dma map IOC configuration itself
 	 */
@@ -549,8 +972,6 @@
 
 	bfa->bfad = bfad;
 	iocfc->bfa = bfa;
-	iocfc->action = BFA_IOCFC_ACT_NONE;
-
 	iocfc->cfg = *cfg;
 
 	/*
@@ -683,6 +1104,8 @@
 
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->start(bfa);
+
+	bfa->iocfc.submod_enabled = BFA_TRUE;
 }
 
 /*
@@ -693,8 +1116,13 @@
 {
 	int		i;
 
+	if (bfa->iocfc.submod_enabled == BFA_FALSE)
+		return;
+
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->iocdisable(bfa);
+
+	bfa->iocfc.submod_enabled = BFA_FALSE;
 }
 
 static void
@@ -702,15 +1130,8 @@
 {
 	struct bfa_s	*bfa = bfa_arg;
 
-	if (complete) {
-		if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
-			bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
-		else
-			bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
-	} else {
-		if (bfa->iocfc.cfgdone)
-			bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
-	}
+	if (complete)
+		bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
 }
 
 static void
@@ -721,8 +1142,6 @@
 
 	if (compl)
 		complete(&bfad->comp);
-	else
-		bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
 }
 
 static void
@@ -794,8 +1213,6 @@
 	fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
 	fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
 
-	iocfc->cfgdone = BFA_TRUE;
-
 	/*
 	 * configure queue register offsets as learnt from firmware
 	 */
@@ -811,22 +1228,13 @@
 	 */
 	bfa_msix_queue_install(bfa);
 
-	/*
-	 * Configuration is complete - initialize/start submodules
-	 */
-	bfa_fcport_init(bfa);
-
-	if (iocfc->action == BFA_IOCFC_ACT_INIT) {
-		if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
-			bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
-				bfa_iocfc_init_cb, bfa);
-	} else {
-		if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
-			bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
-					bfa_iocfc_enable_cb, bfa);
-		bfa_iocfc_start_submod(bfa);
+	if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
+		bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
+		bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
+		bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
 	}
 }
+
 void
 bfa_iocfc_reset_queues(struct bfa_s *bfa)
 {
@@ -840,6 +1248,23 @@
 	}
 }
 
+/*
+ *	Process FAA pwwn msg from fw.
+ */
+static void
+bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
+{
+	struct bfa_iocfc_s		*iocfc   = &bfa->iocfc;
+	struct bfi_iocfc_cfgrsp_s	*cfgrsp  = iocfc->cfgrsp;
+
+	cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
+	cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
+
+	bfa->ioc.attr->pwwn = msg->pwwn;
+	bfa->ioc.attr->nwwn = msg->nwwn;
+	bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+}
+
 /* Fabric Assigned Address specific functions */
 
 /*
@@ -855,84 +1280,13 @@
 		if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
 			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
 	} else {
-		if (!bfa_ioc_is_acq_addr(&bfa->ioc))
-			return BFA_STATUS_IOC_NON_OP;
+		return BFA_STATUS_IOC_NON_OP;
 	}
 
 	return BFA_STATUS_OK;
 }
 
 bfa_status_t
-bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
-{
-	struct bfi_faa_en_dis_s faa_enable_req;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-	bfa_status_t		status;
-
-	iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
-	iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
-	status = bfa_faa_validate_request(bfa);
-	if (status != BFA_STATUS_OK)
-		return status;
-
-	if (iocfc->faa_args.busy == BFA_TRUE)
-		return BFA_STATUS_DEVBUSY;
-
-	if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
-		return BFA_STATUS_FAA_ENABLED;
-
-	if (bfa_fcport_is_trunk_enabled(bfa))
-		return BFA_STATUS_ERROR_TRUNK_ENABLED;
-
-	bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
-	iocfc->faa_args.busy = BFA_TRUE;
-
-	memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
-	bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
-		BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
-
-	bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
-			sizeof(struct bfi_faa_en_dis_s));
-
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
-		void *cbarg)
-{
-	struct bfi_faa_en_dis_s faa_disable_req;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-	bfa_status_t		status;
-
-	iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
-	iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
-	status = bfa_faa_validate_request(bfa);
-	if (status != BFA_STATUS_OK)
-		return status;
-
-	if (iocfc->faa_args.busy == BFA_TRUE)
-		return BFA_STATUS_DEVBUSY;
-
-	if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
-		return BFA_STATUS_FAA_DISABLED;
-
-	bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
-	iocfc->faa_args.busy = BFA_TRUE;
-
-	memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
-	bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
-		BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
-
-	bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
-		sizeof(struct bfi_faa_en_dis_s));
-
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
 		bfa_cb_iocfc_t cbfn, void *cbarg)
 {
@@ -963,38 +1317,6 @@
 }
 
 /*
- *	FAA enable response
- */
-static void
-bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
-		struct bfi_faa_en_dis_rsp_s *rsp)
-{
-	void	*cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
-	bfa_status_t	status = rsp->status;
-
-	WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
-	iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
-	iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
- *	FAA disable response
- */
-static void
-bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
-		struct bfi_faa_en_dis_rsp_s *rsp)
-{
-	void	*cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
-	bfa_status_t	status = rsp->status;
-
-	WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
-	iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
-	iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
  *	FAA query response
  */
 static void
@@ -1023,25 +1345,10 @@
 {
 	struct bfa_s	*bfa = bfa_arg;
 
-	if (status == BFA_STATUS_FAA_ACQ_ADDR) {
-		bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-				bfa_iocfc_init_cb, bfa);
-		return;
-	}
-
-	if (status != BFA_STATUS_OK) {
-		bfa_isr_disable(bfa);
-		if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-			bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-				     bfa_iocfc_init_cb, bfa);
-		else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
-			bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
-					bfa_iocfc_enable_cb, bfa);
-		return;
-	}
-
-	bfa_iocfc_send_cfg(bfa);
-	bfa_dconf_modinit(bfa);
+	if (status == BFA_STATUS_OK)
+		bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
+	else
+		bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
 }
 
 /*
@@ -1052,17 +1359,7 @@
 {
 	struct bfa_s	*bfa = bfa_arg;
 
-	bfa_isr_disable(bfa);
-	bfa_iocfc_disable_submod(bfa);
-
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
-		bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
-			     bfa);
-	else {
-		WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
-		bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
-			     bfa);
-	}
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
 }
 
 /*
@@ -1074,13 +1371,7 @@
 	struct bfa_s	*bfa = bfa_arg;
 
 	bfa->queue_process = BFA_FALSE;
-
-	bfa_isr_disable(bfa);
-	bfa_iocfc_disable_submod(bfa);
-
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-		bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
-			     bfa);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
 }
 
 /*
@@ -1095,7 +1386,6 @@
 	bfa_isr_enable(bfa);
 }
 
-
 /*
  * Query IOC memory requirement information.
  */
@@ -1171,6 +1461,12 @@
 	INIT_LIST_HEAD(&bfa->comp_q);
 	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
 		INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+
+	bfa->iocfc.cb_reqd = BFA_FALSE;
+	bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa->iocfc.submod_enabled = BFA_FALSE;
+
+	bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
 }
 
 /*
@@ -1179,8 +1475,7 @@
 void
 bfa_iocfc_init(struct bfa_s *bfa)
 {
-	bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
-	bfa_ioc_enable(&bfa->ioc);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
 }
 
 /*
@@ -1190,8 +1485,7 @@
 void
 bfa_iocfc_start(struct bfa_s *bfa)
 {
-	if (bfa->iocfc.cfgdone)
-		bfa_iocfc_start_submod(bfa);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
 }
 
 /*
@@ -1201,12 +1495,8 @@
 void
 bfa_iocfc_stop(struct bfa_s *bfa)
 {
-	bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
-
 	bfa->queue_process = BFA_FALSE;
-	bfa_dconf_modexit(bfa);
-	if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
-		bfa_ioc_disable(&bfa->ioc);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
 }
 
 void
@@ -1226,13 +1516,9 @@
 	case BFI_IOCFC_I2H_UPDATEQ_RSP:
 		iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
 		break;
-	case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
-		bfa_faa_enable_reply(iocfc,
-			(struct bfi_faa_en_dis_rsp_s *)msg);
-		break;
-	case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
-		bfa_faa_disable_reply(iocfc,
-			(struct bfi_faa_en_dis_rsp_s *)msg);
+	case BFI_IOCFC_I2H_ADDR_MSG:
+		bfa_iocfc_process_faa_addr(bfa,
+				(struct bfi_faa_addr_msg_s *)msg);
 		break;
 	case BFI_IOCFC_I2H_FAA_QUERY_RSP:
 		bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
@@ -1306,8 +1592,8 @@
 {
 	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
 		     "IOC Enable");
-	bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
-	bfa_ioc_enable(&bfa->ioc);
+	bfa->iocfc.cb_reqd = BFA_TRUE;
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
 }
 
 void
@@ -1315,17 +1601,16 @@
 {
 	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
 		     "IOC Disable");
-	bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
 
 	bfa->queue_process = BFA_FALSE;
-	bfa_ioc_disable(&bfa->ioc);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
 }
 
-
 bfa_boolean_t
 bfa_iocfc_is_operational(struct bfa_s *bfa)
 {
-	return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
+	return bfa_ioc_is_operational(&bfa->ioc) &&
+		bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
 }
 
 /*
@@ -1567,16 +1852,6 @@
 	}
 }
 
-void
-bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
-{
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
-		if (bfa->iocfc.cfgdone == BFA_TRUE)
-			bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-				bfa_iocfc_init_cb, bfa);
-	}
-}
-
 /*
  * Return the list of PCI vendor/device id lists supported by this
  * BFA instance.
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index cb07c62..36756ce 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -52,7 +52,7 @@
 	u16		num_uf_bufs;	/*  unsolicited recv buffers	*/
 	u8		num_cqs;
 	u8		fw_tick_res;	/*  FW clock resolution in ms */
-	u8		rsvd[2];
+	u8		rsvd[6];
 };
 #pragma pack()
 
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index d4f951f..5d2a130 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5717,6 +5717,8 @@
 
 	if (vport_drv->comp_del)
 		complete(vport_drv->comp_del);
+	else
+		kfree(vport_drv);
 
 	bfa_lps_delete(vport->lps);
 }
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 52628d5..fe0463a 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -2169,7 +2169,10 @@
 	 * - MAX receive frame size
 	 */
 	rport->cisc = plogi->csp.cisc;
-	rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+	if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
+		rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+	else
+		rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
 
 	bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
 	bfa_trc(port->fcs, port->fabric->bb_credit);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index eca7ab7..14e6284 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -88,7 +88,6 @@
 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
 				enum bfa_ioc_event_e event);
 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
@@ -97,7 +96,6 @@
 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 
-
 /*
  * IOC state machine definitions/declarations
  */
@@ -114,7 +112,6 @@
 	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
 	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
 	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
-	IOC_E_FWRSP_ACQ_ADDR	= 13,	/*  Acquiring address		*/
 };
 
 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -127,7 +124,6 @@
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
 
 static struct bfa_sm_table_s ioc_sm_table[] = {
 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -140,7 +136,6 @@
 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
-	{BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
 };
 
 /*
@@ -371,17 +366,9 @@
 	switch (event) {
 	case IOC_E_FWRSP_GETATTR:
 		bfa_ioc_timer_stop(ioc);
-		bfa_ioc_check_attr_wwns(ioc);
-		bfa_ioc_hb_monitor(ioc);
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 		break;
 
-	case IOC_E_FWRSP_ACQ_ADDR:
-		bfa_ioc_timer_stop(ioc);
-		bfa_ioc_hb_monitor(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
-		break;
-
 	case IOC_E_PFFAILED:
 	case IOC_E_HWERROR:
 		bfa_ioc_timer_stop(ioc);
@@ -406,51 +393,6 @@
 	}
 }
 
-/*
- * Acquiring address from fabric (entry function)
- */
-static void
-bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
-{
-}
-
-/*
- *	Acquiring address from the fabric
- */
-static void
-bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
-	bfa_trc(ioc, event);
-
-	switch (event) {
-	case IOC_E_FWRSP_GETATTR:
-		bfa_ioc_check_attr_wwns(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
-		break;
-
-	case IOC_E_PFFAILED:
-	case IOC_E_HWERROR:
-		bfa_hb_timer_stop(ioc);
-	case IOC_E_HBFAIL:
-		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
-		if (event != IOC_E_PFFAILED)
-			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_hb_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
-		break;
-
-	case IOC_E_ENABLE:
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
 static void
 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 {
@@ -458,6 +400,7 @@
 
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+	bfa_ioc_hb_monitor(ioc);
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 }
@@ -738,26 +681,60 @@
 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 {
 	struct bfi_ioc_image_hdr_s	fwhdr;
-	u32	fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+	u32	r32, fwstate, pgnum, pgoff, loff = 0;
+	int	i;
+
+	/*
+	 * Spin on init semaphore to serialize.
+	 */
+	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+	while (r32 & 0x1) {
+		udelay(20);
+		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+	}
 
 	/* h/w sem init */
-	if (fwstate == BFI_IOC_UNINIT)
+	fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+	if (fwstate == BFI_IOC_UNINIT) {
+		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 		goto sem_get;
+	}
 
 	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
 
-	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 		goto sem_get;
-
-	bfa_trc(iocpf->ioc, fwstate);
-	bfa_trc(iocpf->ioc, fwhdr.exec);
-	writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+	}
 
 	/*
-	 * Try to lock and then unlock the semaphore.
+	 * Clear fwver hdr
+	 */
+	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
+
+	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
+		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
+		loff += sizeof(u32);
+	}
+
+	bfa_trc(iocpf->ioc, fwstate);
+	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
+	writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+	writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
+
+	/*
+	 * Unlock the hw semaphore. Should be here only once per boot.
 	 */
 	readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
 	writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+
+	/*
+	 * unlock init semaphore.
+	 */
+	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+
 sem_get:
 	bfa_ioc_hw_sem_get(iocpf->ioc);
 }
@@ -1707,11 +1684,6 @@
 	u32 i;
 	u32 asicmode;
 
-	/*
-	 * Initialize LMEM first before code download
-	 */
-	bfa_ioc_lmem_init(ioc);
-
 	bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
 	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
@@ -1999,6 +1971,12 @@
 	bfa_ioc_pll_init_asic(ioc);
 
 	ioc->pllinit = BFA_TRUE;
+
+	/*
+	 * Initialize LMEM
+	 */
+	bfa_ioc_lmem_init(ioc);
+
 	/*
 	 *  release semaphore.
 	 */
@@ -2122,10 +2100,6 @@
 		bfa_ioc_getattr_reply(ioc);
 		break;
 
-	case BFI_IOC_I2H_ACQ_ADDR_REPLY:
-		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
-		break;
-
 	default:
 		bfa_trc(ioc, msg->mh.msg_id);
 		WARN_ON(1);
@@ -2416,15 +2390,6 @@
 }
 
 /*
- * Return TRUE if IOC is in acquiring address state
- */
-bfa_boolean_t
-bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
-{
-	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
-}
-
-/*
  * return true if IOC firmware is different.
  */
 bfa_boolean_t
@@ -2916,17 +2881,6 @@
 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
-static void
-bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
-{
-	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
-		return;
-	if (ioc->attr->nwwn == 0)
-		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
-	if (ioc->attr->pwwn == 0)
-		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
-}
-
 /*
  *  BFA IOC PF private functions
  */
@@ -4495,7 +4449,7 @@
  */
 
 #define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
-#define BFA_DIAG_FWPING_TOV	1000	/* msec */
+#define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
 
 /* IOC event handler */
 static void
@@ -4772,7 +4726,7 @@
 }
 
 static void
-diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
 {
 	bfa_trc(diag, diag->ledtest.lock);
 	diag->ledtest.lock = BFA_FALSE;
@@ -4850,6 +4804,8 @@
 		u32 pattern, struct bfa_diag_memtest_result *result,
 		bfa_cb_diag_t cbfn, void *cbarg)
 {
+	u32	memtest_tov;
+
 	bfa_trc(diag, pattern);
 
 	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
@@ -4869,8 +4825,10 @@
 	/* download memtest code and take LPU0 out of reset */
 	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
 
+	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
+		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
 	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
-			bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
+			bfa_diag_memtest_done, diag, memtest_tov);
 	diag->timer_active = 1;
 	return BFA_STATUS_OK;
 }
@@ -5641,24 +5599,27 @@
 	case BFA_DCONF_SM_INIT:
 		if (dconf->min_cfg) {
 			bfa_trc(dconf->bfa, dconf->min_cfg);
+			bfa_fsm_send_event(&dconf->bfa->iocfc,
+					IOCFC_E_DCONF_DONE);
 			return;
 		}
 		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
-		dconf->flashdone = BFA_FALSE;
-		bfa_trc(dconf->bfa, dconf->flashdone);
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
 		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
 					BFA_FLASH_PART_DRV, dconf->instance,
 					dconf->dconf,
 					sizeof(struct bfa_dconf_s), 0,
 					bfa_dconf_init_cb, dconf->bfa);
 		if (bfa_status != BFA_STATUS_OK) {
+			bfa_timer_stop(&dconf->timer);
 			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
 			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
 			return;
 		}
 		break;
 	case BFA_DCONF_SM_EXIT:
-		dconf->flashdone = BFA_TRUE;
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
 	case BFA_DCONF_SM_IOCDISABLE:
 	case BFA_DCONF_SM_WR:
 	case BFA_DCONF_SM_FLASH_COMP:
@@ -5679,15 +5640,20 @@
 
 	switch (event) {
 	case BFA_DCONF_SM_FLASH_COMP:
+		bfa_timer_stop(&dconf->timer);
 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
 		break;
 	case BFA_DCONF_SM_TIMEOUT:
 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
 		break;
 	case BFA_DCONF_SM_EXIT:
-		dconf->flashdone = BFA_TRUE;
-		bfa_trc(dconf->bfa, dconf->flashdone);
+		bfa_timer_stop(&dconf->timer);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+		break;
 	case BFA_DCONF_SM_IOCDISABLE:
+		bfa_timer_stop(&dconf->timer);
 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
 		break;
 	default:
@@ -5710,9 +5676,8 @@
 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
 		break;
 	case BFA_DCONF_SM_EXIT:
-		dconf->flashdone = BFA_TRUE;
-		bfa_trc(dconf->bfa, dconf->flashdone);
 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
 		break;
 	case BFA_DCONF_SM_INIT:
 	case BFA_DCONF_SM_IOCDISABLE:
@@ -5774,9 +5739,7 @@
 		bfa_timer_stop(&dconf->timer);
 	case BFA_DCONF_SM_TIMEOUT:
 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
-		dconf->flashdone = BFA_TRUE;
-		bfa_trc(dconf->bfa, dconf->flashdone);
-		bfa_ioc_disable(&dconf->bfa->ioc);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
 		break;
 	default:
 		bfa_sm_fault(dconf->bfa, event);
@@ -5823,8 +5786,8 @@
 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
 		break;
 	case BFA_DCONF_SM_EXIT:
-		dconf->flashdone = BFA_TRUE;
 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
 		break;
 	case BFA_DCONF_SM_IOCDISABLE:
 		break;
@@ -5865,11 +5828,6 @@
 	if (cfg->drvcfg.min_cfg) {
 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
 		dconf->min_cfg = BFA_TRUE;
-		/*
-		 * Set the flashdone flag to TRUE explicitly as no flash
-		 * write will happen in min_cfg mode.
-		 */
-		dconf->flashdone = BFA_TRUE;
 	} else {
 		dconf->min_cfg = BFA_FALSE;
 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
@@ -5885,9 +5843,7 @@
 	struct bfa_s *bfa = arg;
 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
 
-	dconf->flashdone = BFA_TRUE;
-	bfa_trc(bfa, dconf->flashdone);
-	bfa_iocfc_cb_dconf_modinit(bfa, status);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
 	if (status == BFA_STATUS_OK) {
 		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
 		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5895,7 +5851,7 @@
 		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
 			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
 	}
-	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
 }
 
 void
@@ -5977,7 +5933,5 @@
 bfa_dconf_modexit(struct bfa_s *bfa)
 {
 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
-	BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
-	bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
 	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
 }
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 546d46b..1a99d4b 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -373,6 +373,22 @@
 };
 
 /*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+	IOCFC_E_INIT		= 1,	/* IOCFC init request		*/
+	IOCFC_E_START		= 2,	/* IOCFC mod start request	*/
+	IOCFC_E_STOP		= 3,	/* IOCFC stop request		*/
+	IOCFC_E_ENABLE		= 4,	/* IOCFC enable request		*/
+	IOCFC_E_DISABLE		= 5,	/* IOCFC disable request	*/
+	IOCFC_E_IOC_ENABLED	= 6,	/* IOC enabled message		*/
+	IOCFC_E_IOC_DISABLED	= 7,	/* IOC disabled message		*/
+	IOCFC_E_IOC_FAILED	= 8,	/* failure notice by IOC sm	*/
+	IOCFC_E_DCONF_DONE	= 9,	/* dconf read/write done	*/
+	IOCFC_E_CFG_DONE	= 10,	/* IOCFC config complete	*/
+};
+
+/*
  * ASIC block configurtion related
  */
 
@@ -706,7 +722,6 @@
 struct bfa_dconf_mod_s {
 	bfa_sm_t		sm;
 	u8			instance;
-	bfa_boolean_t		flashdone;
 	bfa_boolean_t		read_data_valid;
 	bfa_boolean_t		min_cfg;
 	struct bfa_timer_s	timer;
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index d1b8f0c..2eb0c6a 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -786,17 +786,73 @@
 }
 
 #define CT2_NFC_MAX_DELAY	1000
+#define CT2_NFC_VER_VALID	0x143
+#define BFA_IOC_PLL_POLL	1000000
+
+static bfa_boolean_t
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+	u32	r32;
+
+	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+	if (r32 & __NFC_CONTROLLER_HALTED)
+		return BFA_TRUE;
+
+	return BFA_FALSE;
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+	u32	r32;
+	int i;
+
+	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+		if (!(r32 & __NFC_CONTROLLER_HALTED))
+			return;
+		udelay(1000);
+	}
+	WARN_ON(1);
+}
+
 bfa_status_t
 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
 {
-	u32	wgn, r32;
-	int i;
+	u32 wgn, r32, nfc_ver, i;
 
-	/*
-	 * Initialize PLL if not already done by NFC
-	 */
 	wgn = readl(rb + CT2_WGN_STATUS);
-	if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+	nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+	if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
+	    (nfc_ver >= CT2_NFC_VER_VALID)) {
+		if (bfa_ioc_ct2_nfc_halted(rb))
+			bfa_ioc_ct2_nfc_resume(rb);
+
+		writel(__RESET_AND_START_SCLK_LCLK_PLLS,
+		       rb + CT2_CSI_FW_CTL_SET_REG);
+
+		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+			if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
+				break;
+		}
+
+		WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+
+		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+			if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
+				break;
+		}
+
+		WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+		udelay(1000);
+
+		r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+		WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+	} else {
 		writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
 		for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
 			r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -804,6 +860,34 @@
 				break;
 			udelay(1000);
 		}
+
+		bfa_ioc_ct2_mac_reset(rb);
+		bfa_ioc_ct2_sclk_init(rb);
+		bfa_ioc_ct2_lclk_init(rb);
+
+		/*
+		 * release soft reset on s_clk & l_clk
+		 */
+		r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
+		writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+		       (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+		/*
+		 * release soft reset on s_clk & l_clk
+		 */
+		r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+		writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+		      (rb + CT2_APP_PLL_LCLK_CTL_REG));
+	}
+
+	/*
+	 * Announce flash device presence, if flash was corrupted.
+	 */
+	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+		r32 = readl(rb + PSS_GPIO_OUT_REG);
+		writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
+		r32 = readl(rb + PSS_GPIO_OE_REG);
+		writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
 	}
 
 	/*
@@ -813,48 +897,25 @@
 	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
 	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
 
-	r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-	if (r32 == 1) {
-		writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
-		readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-	}
-	r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-	if (r32 == 1) {
-		writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
-		readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-	}
-
-	bfa_ioc_ct2_mac_reset(rb);
-	bfa_ioc_ct2_sclk_init(rb);
-	bfa_ioc_ct2_lclk_init(rb);
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
-	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
-		(rb + CT2_APP_PLL_SCLK_CTL_REG));
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
-		(rb + CT2_APP_PLL_LCLK_CTL_REG));
-
-	/*
-	 * Announce flash device presence, if flash was corrupted.
-	 */
-	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
-		r32 = readl((rb + PSS_GPIO_OUT_REG));
-		writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
-		r32 = readl((rb + PSS_GPIO_OE_REG));
-		writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+	/* For first time initialization, no need to clear interrupts */
+	r32 = readl(rb + HOST_SEM5_REG);
+	if (r32 & 0x1) {
+		r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
+		if (r32 == 1) {
+			writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		}
+		r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+		if (r32 == 1) {
+			writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
+			readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+		}
 	}
 
 	bfa_ioc_ct2_mem_init(rb);
 
-	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
-	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
+	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
+
 	return BFA_STATUS_OK;
 }
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index aa8a0ea..2e856e6 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -1280,6 +1280,7 @@
 	switch (event) {
 	case BFA_LPS_SM_RESUME:
 		bfa_sm_set_state(lps, bfa_lps_sm_login);
+		bfa_lps_send_login(lps);
 		break;
 
 	case BFA_LPS_SM_OFFLINE:
@@ -1578,7 +1579,7 @@
 		break;
 
 	case BFA_STATUS_VPORT_MAX:
-		if (!rsp->ext_status)
+		if (rsp->ext_status)
 			bfa_lps_no_res(lps, rsp->ext_status);
 		break;
 
@@ -3084,33 +3085,6 @@
 }
 
 static void
-bfa_fcport_send_txcredit(void *port_cbarg)
-{
-
-	struct bfa_fcport_s *fcport = port_cbarg;
-	struct bfi_fcport_set_svc_params_req_s *m;
-
-	/*
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
-	if (!m) {
-		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
-		return;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
-			bfa_fn_lpu(fcport->bfa));
-	m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
-	m->bb_scn = fcport->cfg.bb_scn;
-
-	/*
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
-}
-
-static void
 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
 	struct bfa_qos_stats_s *s)
 {
@@ -3602,26 +3576,24 @@
 		return BFA_STATUS_UNSUPP_SPEED;
 	}
 
-	/* For Mezz card, port speed entered needs to be checked */
-	if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
-		if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
-			/* For CT2, 1G is not supported */
-			if ((speed == BFA_PORT_SPEED_1GBPS) &&
-			    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
-				return BFA_STATUS_UNSUPP_SPEED;
+	/* Port speed entered needs to be checked */
+	if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+		/* For CT2, 1G is not supported */
+		if ((speed == BFA_PORT_SPEED_1GBPS) &&
+		    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+			return BFA_STATUS_UNSUPP_SPEED;
 
-			/* Already checked for Auto Speed and Max Speed supp */
-			if (!(speed == BFA_PORT_SPEED_1GBPS ||
-			      speed == BFA_PORT_SPEED_2GBPS ||
-			      speed == BFA_PORT_SPEED_4GBPS ||
-			      speed == BFA_PORT_SPEED_8GBPS ||
-			      speed == BFA_PORT_SPEED_16GBPS ||
-			      speed == BFA_PORT_SPEED_AUTO))
-				return BFA_STATUS_UNSUPP_SPEED;
-		} else {
-			if (speed != BFA_PORT_SPEED_10GBPS)
-				return BFA_STATUS_UNSUPP_SPEED;
-		}
+		/* Already checked for Auto Speed and Max Speed supp */
+		if (!(speed == BFA_PORT_SPEED_1GBPS ||
+		      speed == BFA_PORT_SPEED_2GBPS ||
+		      speed == BFA_PORT_SPEED_4GBPS ||
+		      speed == BFA_PORT_SPEED_8GBPS ||
+		      speed == BFA_PORT_SPEED_16GBPS ||
+		      speed == BFA_PORT_SPEED_AUTO))
+			return BFA_STATUS_UNSUPP_SPEED;
+	} else {
+		if (speed != BFA_PORT_SPEED_10GBPS)
+			return BFA_STATUS_UNSUPP_SPEED;
 	}
 
 	fcport->cfg.speed = speed;
@@ -3765,7 +3737,6 @@
 	fcport->cfg.bb_scn = bb_scn;
 	if (bb_scn)
 		fcport->bbsc_op_state = BFA_TRUE;
-	bfa_fcport_send_txcredit(fcport);
 }
 
 /*
@@ -3825,8 +3796,6 @@
 			attr->port_state = BFA_PORT_ST_IOCDIS;
 		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
 			attr->port_state = BFA_PORT_ST_FWMISMATCH;
-		else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
-			attr->port_state = BFA_PORT_ST_ACQ_ADDR;
 	}
 
 	/* FCoE vlan */
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index b52cbb6..f300675 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -663,10 +663,6 @@
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
 /* FAA specific APIs */
-bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
-			bfa_cb_iocfc_t cbfn, void *cbarg);
-bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
-			bfa_cb_iocfc_t cbfn, void *cbarg);
 bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
 			bfa_cb_iocfc_t cbfn, void *cbarg);
 
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 1938fe0..7b1ecd2 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -442,6 +442,43 @@
 	return status;
 }
 
+int
+bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+	uint32_t status;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	status = bfa_port_disable(&bfad->bfa.modules.port,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (status != BFA_STATUS_OK)
+		return -EIO;
+
+	wait_for_completion(&fcomp.comp);
+	if (fcomp.status != BFA_STATUS_OK)
+		return -EIO;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	status = bfa_port_enable(&bfad->bfa.modules.port,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (status != BFA_STATUS_OK)
+		return -EIO;
+
+	wait_for_completion(&fcomp.comp);
+	if (fcomp.status != BFA_STATUS_OK)
+		return -EIO;
+
+	return 0;
+}
+
 static int
 bfad_im_vport_delete(struct fc_vport *fc_vport)
 {
@@ -457,8 +494,11 @@
 	unsigned long flags;
 	struct completion fcomp;
 
-	if (im_port->flags & BFAD_PORT_DELETE)
-		goto free_scsi_host;
+	if (im_port->flags & BFAD_PORT_DELETE) {
+		bfad_scsi_host_free(bfad, im_port);
+		list_del(&vport->list_entry);
+		return 0;
+	}
 
 	port = im_port->port;
 
@@ -489,7 +529,6 @@
 
 	wait_for_completion(vport->comp_del);
 
-free_scsi_host:
 	bfad_scsi_host_free(bfad, im_port);
 	list_del(&vport->list_entry);
 	kfree(vport);
@@ -579,7 +618,7 @@
 	.show_rport_dev_loss_tmo = 1,
 	.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
 	.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
-
+	.issue_fc_host_lip = bfad_im_issue_fc_host_lip,
 	.vport_create = bfad_im_vport_create,
 	.vport_delete = bfad_im_vport_delete,
 	.vport_disable = bfad_im_vport_disable,
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 8005c6c..e1f4b10 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -1288,50 +1288,6 @@
 }
 
 int
-bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
-{
-	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
-	unsigned long   flags;
-	struct bfad_hal_comp    fcomp;
-
-	init_completion(&fcomp.comp);
-	iocmd->status = BFA_STATUS_OK;
-	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
-	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
-	if (iocmd->status != BFA_STATUS_OK)
-		goto out;
-
-	wait_for_completion(&fcomp.comp);
-	iocmd->status = fcomp.status;
-out:
-	return 0;
-}
-
-int
-bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
-{
-	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
-	unsigned long   flags;
-	struct bfad_hal_comp    fcomp;
-
-	init_completion(&fcomp.comp);
-	iocmd->status = BFA_STATUS_OK;
-	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
-	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
-	if (iocmd->status != BFA_STATUS_OK)
-		goto out;
-
-	wait_for_completion(&fcomp.comp);
-	iocmd->status = fcomp.status;
-out:
-	return 0;
-}
-
-int
 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
 {
 	struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
@@ -1918,6 +1874,7 @@
 	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
 	void	*iocmd_bufptr;
 	unsigned long	flags;
+	u32 offset;
 
 	if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
 			BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
@@ -1935,8 +1892,10 @@
 
 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	offset = iocmd->offset;
 	iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
-				(u32 *)&iocmd->offset, &iocmd->bufsz);
+				&offset, &iocmd->bufsz);
+	iocmd->offset = offset;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 out:
 	return 0;
@@ -2633,12 +2592,6 @@
 	case IOCMD_FLASH_DISABLE_OPTROM:
 		rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
 		break;
-	case IOCMD_FAA_ENABLE:
-		rc = bfad_iocmd_faa_enable(bfad, iocmd);
-		break;
-	case IOCMD_FAA_DISABLE:
-		rc = bfad_iocmd_faa_disable(bfad, iocmd);
-		break;
 	case IOCMD_FAA_QUERY:
 		rc = bfad_iocmd_faa_query(bfad, iocmd);
 		break;
@@ -2809,9 +2762,16 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) job->shost->hostdata[0];
 	struct bfad_s *bfad = im_port->bfad;
+	struct request_queue *request_q = job->req->q;
 	void *payload_kbuf;
 	int rc = -EINVAL;
 
+	/*
+	 * Set the BSG device request_queue size to 256 to support
+	 * payloads larger than 512*1024K bytes.
+	 */
+	blk_queue_max_segments(request_q, 256);
+
 	/* Allocate a temp buffer to hold the passed in user space command */
 	payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
 	if (!payload_kbuf) {
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index e859adb..17ad672 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -83,8 +83,6 @@
 	IOCMD_PORT_CFG_MODE,
 	IOCMD_FLASH_ENABLE_OPTROM,
 	IOCMD_FLASH_DISABLE_OPTROM,
-	IOCMD_FAA_ENABLE,
-	IOCMD_FAA_DISABLE,
 	IOCMD_FAA_QUERY,
 	IOCMD_CEE_GET_ATTR,
 	IOCMD_CEE_GET_STATS,
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index dc5b9d9..7f74f1d 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.0.2.2"
+#define BFAD_DRIVER_VERSION    "3.0.23.0"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 0d9f1fb..d4220e1 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -28,17 +28,15 @@
 	BFI_IOCFC_H2I_CFG_REQ		= 1,
 	BFI_IOCFC_H2I_SET_INTR_REQ	= 2,
 	BFI_IOCFC_H2I_UPDATEQ_REQ	= 3,
-	BFI_IOCFC_H2I_FAA_ENABLE_REQ	= 4,
-	BFI_IOCFC_H2I_FAA_DISABLE_REQ	= 5,
-	BFI_IOCFC_H2I_FAA_QUERY_REQ	= 6,
+	BFI_IOCFC_H2I_FAA_QUERY_REQ	= 4,
+	BFI_IOCFC_H2I_ADDR_REQ		= 5,
 };
 
 enum bfi_iocfc_i2h_msgs {
 	BFI_IOCFC_I2H_CFG_REPLY		= BFA_I2HM(1),
 	BFI_IOCFC_I2H_UPDATEQ_RSP	= BFA_I2HM(3),
-	BFI_IOCFC_I2H_FAA_ENABLE_RSP	= BFA_I2HM(4),
-	BFI_IOCFC_I2H_FAA_DISABLE_RSP	= BFA_I2HM(5),
-	BFI_IOCFC_I2H_FAA_QUERY_RSP	= BFA_I2HM(6),
+	BFI_IOCFC_I2H_FAA_QUERY_RSP	= BFA_I2HM(4),
+	BFI_IOCFC_I2H_ADDR_MSG		= BFA_I2HM(5),
 };
 
 struct bfi_iocfc_cfg_s {
@@ -184,6 +182,13 @@
 	struct bfi_mhdr_s mh;	/* common msg header    */
 };
 
+struct bfi_faa_addr_msg_s {
+	struct  bfi_mhdr_s mh;	/* common msg header	*/
+	u8	rsvd[4];
+	wwn_t	pwwn;		/* Fabric acquired PWWN	*/
+	wwn_t	nwwn;		/* Fabric acquired PWWN	*/
+};
+
 /*
  * BFI_IOCFC_H2I_FAA_QUERY_REQ message
  */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index d892064..ed5f159 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -335,11 +335,17 @@
 #define __PMM_1T_PNDB_P			0x00000002
 #define CT2_PMM_1T_CONTROL_REG_P1	0x00023c1c
 #define CT2_WGN_STATUS			0x00014990
+#define __A2T_AHB_LOAD			0x00000800
 #define __WGN_READY			0x00000400
 #define __GLBL_PF_VF_CFG_RDY		0x00000200
+#define CT2_NFC_CSR_CLR_REG		0x00027420
 #define CT2_NFC_CSR_SET_REG		0x00027424
 #define __HALT_NFC_CONTROLLER		0x00000002
 #define __NFC_CONTROLLER_HALTED		0x00001000
+#define CT2_RSC_GPR15_REG		0x0002765c
+#define CT2_CSI_FW_CTL_REG		0x00027080
+#define CT2_CSI_FW_CTL_SET_REG		0x00027088
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
 
 #define CT2_CSI_MAC0_CONTROL_REG	0x000270d0
 #define __CSI_MAC_RESET			0x00000010
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index abd72a0..c1c6a92 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -439,13 +439,13 @@
 	fr->fr_dev = lport;
 
 	bg = &bnx2fc_global;
-	spin_lock_bh(&bg->fcoe_rx_list.lock);
+	spin_lock(&bg->fcoe_rx_list.lock);
 
 	__skb_queue_tail(&bg->fcoe_rx_list, skb);
 	if (bg->fcoe_rx_list.qlen == 1)
 		wake_up_process(bg->thread);
 
-	spin_unlock_bh(&bg->fcoe_rx_list.lock);
+	spin_unlock(&bg->fcoe_rx_list.lock);
 
 	return 0;
 err:
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ae7d15c..335e851 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1436,7 +1436,7 @@
 		goto err;
 
 	fps = &per_cpu(fcoe_percpu, cpu);
-	spin_lock_bh(&fps->fcoe_rx_list.lock);
+	spin_lock(&fps->fcoe_rx_list.lock);
 	if (unlikely(!fps->thread)) {
 		/*
 		 * The targeted CPU is not ready, let's target
@@ -1447,12 +1447,12 @@
 				"ready for incoming skb- using first online "
 				"CPU.\n");
 
-		spin_unlock_bh(&fps->fcoe_rx_list.lock);
+		spin_unlock(&fps->fcoe_rx_list.lock);
 		cpu = cpumask_first(cpu_online_mask);
 		fps = &per_cpu(fcoe_percpu, cpu);
-		spin_lock_bh(&fps->fcoe_rx_list.lock);
+		spin_lock(&fps->fcoe_rx_list.lock);
 		if (!fps->thread) {
-			spin_unlock_bh(&fps->fcoe_rx_list.lock);
+			spin_unlock(&fps->fcoe_rx_list.lock);
 			goto err;
 		}
 	}
@@ -1463,24 +1463,17 @@
 	 * so we're free to queue skbs into it's queue.
 	 */
 
-	/* If this is a SCSI-FCP frame, and this is already executing on the
-	 * correct CPU, and the queue for this CPU is empty, then go ahead
-	 * and process the frame directly in the softirq context.
-	 * This lets us process completions without context switching from the
-	 * NET_RX softirq, to our receive processing thread, and then back to
-	 * BLOCK softirq context.
+	/*
+	 * Note: We used to have a set of conditions under which we would
+	 * call fcoe_recv_frame directly, rather than queuing to the rx list
+	 * as it could save a few cycles, but doing so is prohibited, as
+	 * fcoe_recv_frame has several paths that may sleep, which is forbidden
+	 * in softirq context.
 	 */
-	if (fh->fh_type == FC_TYPE_FCP &&
-	    cpu == smp_processor_id() &&
-	    skb_queue_empty(&fps->fcoe_rx_list)) {
-		spin_unlock_bh(&fps->fcoe_rx_list.lock);
-		fcoe_recv_frame(skb);
-	} else {
-		__skb_queue_tail(&fps->fcoe_rx_list, skb);
-		if (fps->fcoe_rx_list.qlen == 1)
-			wake_up_process(fps->thread);
-		spin_unlock_bh(&fps->fcoe_rx_list.lock);
-	}
+	__skb_queue_tail(&fps->fcoe_rx_list, skb);
+	if (fps->thread->state == TASK_INTERRUPTIBLE)
+		wake_up_process(fps->thread);
+	spin_unlock(&fps->fcoe_rx_list.lock);
 
 	return 0;
 err:
@@ -1797,23 +1790,29 @@
 {
 	struct fcoe_percpu_s *p = arg;
 	struct sk_buff *skb;
+	struct sk_buff_head tmp;
+
+	skb_queue_head_init(&tmp);
 
 	set_user_nice(current, -20);
 
 	while (!kthread_should_stop()) {
 
 		spin_lock_bh(&p->fcoe_rx_list.lock);
-		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+		skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
+		spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+		while ((skb = __skb_dequeue(&tmp)) != NULL)
+			fcoe_recv_frame(skb);
+
+		spin_lock_bh(&p->fcoe_rx_list.lock);
+		if (!skb_queue_len(&p->fcoe_rx_list)) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			spin_unlock_bh(&p->fcoe_rx_list.lock);
 			schedule();
 			set_current_state(TASK_RUNNING);
-			if (kthread_should_stop())
-				return 0;
-			spin_lock_bh(&p->fcoe_rx_list.lock);
-		}
-		spin_unlock_bh(&p->fcoe_rx_list.lock);
-		fcoe_recv_frame(skb);
+		} else
+			spin_unlock_bh(&p->fcoe_rx_list.lock);
 	}
 	return 0;
 }
@@ -2187,8 +2186,12 @@
 	/* start FIP Discovery and FLOGI */
 	lport->boot_time = jiffies;
 	fc_fabric_login(lport);
-	if (!fcoe_link_ok(lport))
+	if (!fcoe_link_ok(lport)) {
+		rtnl_unlock();
 		fcoe_ctlr_link_up(&fcoe->ctlr);
+		mutex_unlock(&fcoe_config_mutex);
+		return rc;
+	}
 
 out_nodev:
 	rtnl_unlock();
@@ -2261,31 +2264,14 @@
 static void fcoe_percpu_clean(struct fc_lport *lport)
 {
 	struct fcoe_percpu_s *pp;
-	struct fcoe_rcv_info *fr;
-	struct sk_buff_head *list;
-	struct sk_buff *skb, *next;
-	struct sk_buff *head;
+	struct sk_buff *skb;
 	unsigned int cpu;
 
 	for_each_possible_cpu(cpu) {
 		pp = &per_cpu(fcoe_percpu, cpu);
-		spin_lock_bh(&pp->fcoe_rx_list.lock);
-		list = &pp->fcoe_rx_list;
-		head = list->next;
-		for (skb = head; skb != (struct sk_buff *)list;
-		     skb = next) {
-			next = skb->next;
-			fr = fcoe_dev_from_skb(skb);
-			if (fr->fr_dev == lport) {
-				__skb_unlink(skb, list);
-				kfree_skb(skb);
-			}
-		}
 
-		if (!pp->thread || !cpu_online(cpu)) {
-			spin_unlock_bh(&pp->fcoe_rx_list.lock);
+		if (!pp->thread || !cpu_online(cpu))
 			continue;
-		}
 
 		skb = dev_alloc_skb(0);
 		if (!skb) {
@@ -2294,6 +2280,7 @@
 		}
 		skb->destructor = fcoe_percpu_flush_done;
 
+		spin_lock_bh(&pp->fcoe_rx_list.lock);
 		__skb_queue_tail(&pp->fcoe_rx_list, skb);
 		if (pp->fcoe_rx_list.qlen == 1)
 			wake_up_process(pp->thread);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index e7522dc..249a106 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -242,7 +242,7 @@
 		printk(KERN_INFO "libfcoe: host%d: FIP selected "
 		       "Fibre-Channel Forwarder MAC %pM\n",
 		       fip->lp->host->host_no, sel->fcf_mac);
-		memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
+		memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
 		fip->map_dest = 0;
 	}
 unlock:
@@ -824,6 +824,7 @@
 			memcpy(fcf->fcf_mac,
 			       ((struct fip_mac_desc *)desc)->fd_mac,
 			       ETH_ALEN);
+			memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN);
 			if (!is_valid_ether_addr(fcf->fcf_mac)) {
 				LIBFCOE_FIP_DBG(fip,
 					"Invalid MAC addr %pM in FIP adv\n",
@@ -1013,6 +1014,7 @@
 	struct fip_desc *desc;
 	struct fip_encaps *els;
 	struct fcoe_dev_stats *stats;
+	struct fcoe_fcf *sel;
 	enum fip_desc_type els_dtype = 0;
 	u8 els_op;
 	u8 sub;
@@ -1040,7 +1042,8 @@
 			goto drop;
 		/* Drop ELS if there are duplicate critical descriptors */
 		if (desc->fip_dtype < 32) {
-			if (desc_mask & 1U << desc->fip_dtype) {
+			if ((desc->fip_dtype != FIP_DT_MAC) &&
+			    (desc_mask & 1U << desc->fip_dtype)) {
 				LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
 						"Descriptors in FIP ELS\n");
 				goto drop;
@@ -1049,17 +1052,32 @@
 		}
 		switch (desc->fip_dtype) {
 		case FIP_DT_MAC:
+			sel = fip->sel_fcf;
 			if (desc_cnt == 1) {
 				LIBFCOE_FIP_DBG(fip, "FIP descriptors "
 						"received out of order\n");
 				goto drop;
 			}
+			/*
+			 * Some switch implementations send two MAC descriptors,
+			 * with first MAC(granted_mac) being the FPMA, and the
+			 * second one(fcoe_mac) is used as destination address
+			 * for sending/receiving FCoE packets. FIP traffic is
+			 * sent using fip_mac. For regular switches, both
+			 * fip_mac and fcoe_mac would be the same.
+			 */
+			if (desc_cnt == 2)
+				memcpy(granted_mac,
+				       ((struct fip_mac_desc *)desc)->fd_mac,
+				       ETH_ALEN);
 
 			if (dlen != sizeof(struct fip_mac_desc))
 				goto len_err;
-			memcpy(granted_mac,
-			       ((struct fip_mac_desc *)desc)->fd_mac,
-			       ETH_ALEN);
+
+			if ((desc_cnt == 3) && (sel))
+				memcpy(sel->fcoe_mac,
+				       ((struct fip_mac_desc *)desc)->fd_mac,
+				       ETH_ALEN);
 			break;
 		case FIP_DT_FLOGI:
 		case FIP_DT_FDISC:
@@ -1273,11 +1291,6 @@
 		 * No Vx_Port description. Clear all NPIV ports,
 		 * followed by physical port
 		 */
-		mutex_lock(&lport->lp_mutex);
-		list_for_each_entry(vn_port, &lport->vports, list)
-			fc_lport_reset(vn_port);
-		mutex_unlock(&lport->lp_mutex);
-
 		mutex_lock(&fip->ctlr_mutex);
 		per_cpu_ptr(lport->dev_stats,
 			    get_cpu())->VLinkFailureCount++;
@@ -1285,6 +1298,11 @@
 		fcoe_ctlr_reset(fip);
 		mutex_unlock(&fip->ctlr_mutex);
 
+		mutex_lock(&lport->lp_mutex);
+		list_for_each_entry(vn_port, &lport->vports, list)
+			fc_lport_reset(vn_port);
+		mutex_unlock(&lport->lp_mutex);
+
 		fc_lport_reset(fip->lp);
 		fcoe_ctlr_solicit(fip, NULL);
 	} else {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cdfe5a1..e002cd4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -104,7 +104,9 @@
 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
 		.mailbox = 0x0042C,
+		.max_cmds = 100,
 		.cache_line_size = 0x20,
+		.clear_isr = 1,
 		{
 			.set_interrupt_mask_reg = 0x0022C,
 			.clr_interrupt_mask_reg = 0x00230,
@@ -126,7 +128,9 @@
 	},
 	{ /* Snipe and Scamp */
 		.mailbox = 0x0052C,
+		.max_cmds = 100,
 		.cache_line_size = 0x20,
+		.clear_isr = 1,
 		{
 			.set_interrupt_mask_reg = 0x00288,
 			.clr_interrupt_mask_reg = 0x0028C,
@@ -148,7 +152,9 @@
 	},
 	{ /* CRoC */
 		.mailbox = 0x00044,
+		.max_cmds = 1000,
 		.cache_line_size = 0x20,
+		.clear_isr = 0,
 		{
 			.set_interrupt_mask_reg = 0x00010,
 			.clr_interrupt_mask_reg = 0x00018,
@@ -847,8 +853,6 @@
 
 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
 
-	mb();
-
 	ipr_send_command(ipr_cmd);
 }
 
@@ -982,8 +986,6 @@
 
 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
 
-		mb();
-
 		ipr_send_command(ipr_cmd);
 	} else {
 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
@@ -4339,8 +4341,7 @@
 
 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 		if ((res->bus == starget->channel) &&
-		    (res->target == starget->id) &&
-		    (res->lun == 0)) {
+		    (res->target == starget->id)) {
 			return res;
 		}
 	}
@@ -4414,12 +4415,14 @@
 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
 
 	if (ioa_cfg->sis64) {
-		if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
-			clear_bit(starget->id, ioa_cfg->array_ids);
-		else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
-			clear_bit(starget->id, ioa_cfg->vset_ids);
-		else if (starget->channel == 0)
-			clear_bit(starget->id, ioa_cfg->target_ids);
+		if (!ipr_find_starget(starget)) {
+			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
+				clear_bit(starget->id, ioa_cfg->array_ids);
+			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
+				clear_bit(starget->id, ioa_cfg->vset_ids);
+			else if (starget->channel == 0)
+				clear_bit(starget->id, ioa_cfg->target_ids);
+		}
 	}
 
 	if (sata_port) {
@@ -5048,12 +5051,14 @@
 		del_timer(&ioa_cfg->reset_cmd->timer);
 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
 	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
-		if (ipr_debug && printk_ratelimit())
-			dev_err(&ioa_cfg->pdev->dev,
-				"Spurious interrupt detected. 0x%08X\n", int_reg);
-		writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
-		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
-		return IRQ_NONE;
+		if (ioa_cfg->clear_isr) {
+			if (ipr_debug && printk_ratelimit())
+				dev_err(&ioa_cfg->pdev->dev,
+					"Spurious interrupt detected. 0x%08X\n", int_reg);
+			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+			return IRQ_NONE;
+		}
 	} else {
 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
 			ioa_cfg->ioa_unit_checked = 1;
@@ -5153,6 +5158,9 @@
 			}
 		}
 
+		if (ipr_cmd && !ioa_cfg->clear_isr)
+			break;
+
 		if (ipr_cmd != NULL) {
 			/* Clear the PCI interrupt */
 			num_hrrq = 0;
@@ -5854,14 +5862,12 @@
 			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
 	}
 
-	if (likely(rc == 0)) {
-		mb();
-		ipr_send_command(ipr_cmd);
-	} else {
-		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
-		 return SCSI_MLQUEUE_HOST_BUSY;
+	if (unlikely(rc != 0)) {
+		list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+		return SCSI_MLQUEUE_HOST_BUSY;
 	}
 
+	ipr_send_command(ipr_cmd);
 	return 0;
 }
 
@@ -6239,8 +6245,6 @@
 		return AC_ERR_INVALID;
 	}
 
-	mb();
-
 	ipr_send_command(ipr_cmd);
 
 	return 0;
@@ -8277,6 +8281,10 @@
 	if (ioa_cfg->ipr_cmd_pool)
 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
 
+	kfree(ioa_cfg->ipr_cmnd_list);
+	kfree(ioa_cfg->ipr_cmnd_list_dma);
+	ioa_cfg->ipr_cmnd_list = NULL;
+	ioa_cfg->ipr_cmnd_list_dma = NULL;
 	ioa_cfg->ipr_cmd_pool = NULL;
 }
 
@@ -8352,11 +8360,19 @@
 	int i;
 
 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
-						 sizeof(struct ipr_cmnd), 16, 0);
+						 sizeof(struct ipr_cmnd), 512, 0);
 
 	if (!ioa_cfg->ipr_cmd_pool)
 		return -ENOMEM;
 
+	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
+	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
+
+	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
+		ipr_free_cmd_blks(ioa_cfg);
+		return -ENOMEM;
+	}
+
 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
 
@@ -8584,6 +8600,7 @@
 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
 	host->unique_id = host->host_no;
 	host->max_cmd_len = IPR_MAX_CDB_LEN;
+	host->can_queue = ioa_cfg->max_cmds;
 	pci_set_drvdata(pdev, ioa_cfg);
 
 	p = &ioa_cfg->chip_cfg->regs;
@@ -8768,6 +8785,8 @@
 	/* set SIS 32 or SIS 64 */
 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
+	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
 
 	if (ipr_transop_timeout)
 		ioa_cfg->transop_timeout = ipr_transop_timeout;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index f94eaee..153b8bd 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.5.2"
-#define IPR_DRIVER_DATE "(April 27, 2011)"
+#define IPR_DRIVER_VERSION "2.5.3"
+#define IPR_DRIVER_DATE "(March 10, 2012)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -53,7 +53,7 @@
  * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
  *	ops the mid-layer can send to the adapter.
  */
-#define IPR_NUM_BASE_CMD_BLKS				100
+#define IPR_NUM_BASE_CMD_BLKS			(ioa_cfg->max_cmds)
 
 #define PCI_DEVICE_ID_IBM_OBSIDIAN_E	0x0339
 
@@ -153,7 +153,7 @@
 #define IPR_NUM_INTERNAL_CMD_BLKS	(IPR_NUM_HCAMS + \
                                      ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
 
-#define IPR_MAX_COMMANDS		IPR_NUM_BASE_CMD_BLKS
+#define IPR_MAX_COMMANDS		100
 #define IPR_NUM_CMD_BLKS		(IPR_NUM_BASE_CMD_BLKS + \
 						IPR_NUM_INTERNAL_CMD_BLKS)
 
@@ -1305,7 +1305,9 @@
 
 struct ipr_chip_cfg_t {
 	u32 mailbox;
+	u16 max_cmds;
 	u8 cache_line_size;
+	u8 clear_isr;
 	struct ipr_interrupt_offsets regs;
 };
 
@@ -1388,6 +1390,7 @@
 	u8 sis64:1;
 	u8 dump_timeout:1;
 	u8 cfg_locked:1;
+	u8 clear_isr:1;
 
 	u8 revid;
 
@@ -1501,8 +1504,9 @@
 	struct ata_host ata_host;
 	char ipr_cmd_label[8];
 #define IPR_CMD_LABEL		"ipr_cmd"
-	struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
-	dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
+	u32 max_cmds;
+	struct ipr_cmnd **ipr_cmnd_list;
+	dma_addr_t *ipr_cmnd_list_dma;
 }; /* struct ipr_ioa_cfg */
 
 struct ipr_cmnd {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 630291f..aceffad 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2263,7 +2263,18 @@
 	mp->class = class;
 	/* adjust em exch xid range for offload */
 	mp->min_xid = min_xid;
-	mp->max_xid = max_xid;
+
+       /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
+	pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
+		sizeof(struct fc_exch *);
+	if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
+		mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
+			min_xid - 1;
+	} else {
+		mp->max_xid = max_xid;
+		pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
+			(fc_cpu_mask + 1);
+	}
 
 	mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
 	if (!mp->ep_pool)
@@ -2274,7 +2285,6 @@
 	 * divided across all cpus. The exch pointers array memory is
 	 * allocated for exch range per pool.
 	 */
-	pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
 	mp->pool_max_index = pool_exch_range - 1;
 
 	/*
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index bd5d31d..ef9560d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1743,8 +1743,16 @@
 	mfs = ntohs(flp->fl_csp.sp_bb_data) &
 		FC_SP_BB_DATA_MASK;
 	if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
-	    mfs < lport->mfs)
+	    mfs <= lport->mfs) {
 		lport->mfs = mfs;
+		fc_host_maxframe_size(lport->host) = mfs;
+	} else {
+		FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+			     "lport->mfs:%hu\n", mfs, lport->mfs);
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
 	csp_flags = ntohs(flp->fl_csp.sp_features);
 	r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
 	e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 88928f0..fe5d396 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
 #/*******************************************************************
 # * This file is part of the Emulex Linux Device Driver for         *
 # * Fibre Channel Host Bus Adapters.                                *
-# * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+# * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
 # * EMULEX and SLI are trademarks of Emulex.                        *
 # * www.emulex.com                                                  *
 # *                                                                 *
@@ -22,6 +22,8 @@
 ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
 ccflags-$(GCOV) += -O0
 
+ccflags-y += -Werror
+
 obj-$(CONFIG_SCSI_LPFC) := lpfc.o
 
 lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o	\
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 5fc044f..3a1ffdd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -840,6 +840,8 @@
 	struct dentry *debug_dumpData;   /* BlockGuard BPL */
 	struct dentry *debug_dumpDif;    /* BlockGuard BPL */
 	struct dentry *debug_InjErrLBA;  /* LBA to inject errors at */
+	struct dentry *debug_InjErrNPortID;  /* NPortID to inject errors at */
+	struct dentry *debug_InjErrWWPN;  /* WWPN to inject errors at */
 	struct dentry *debug_writeGuard; /* inject write guard_tag errors */
 	struct dentry *debug_writeApp;   /* inject write app_tag errors */
 	struct dentry *debug_writeRef;   /* inject write ref_tag errors */
@@ -854,6 +856,8 @@
 	uint32_t lpfc_injerr_rgrd_cnt;
 	uint32_t lpfc_injerr_rapp_cnt;
 	uint32_t lpfc_injerr_rref_cnt;
+	uint32_t lpfc_injerr_nportid;
+	struct lpfc_name lpfc_injerr_wwpn;
 	sector_t lpfc_injerr_lba;
 #define LPFC_INJERR_LBA_OFF	(sector_t)(-1)
 
@@ -908,6 +912,8 @@
 	atomic_t fast_event_count;
 	uint32_t fcoe_eventtag;
 	uint32_t fcoe_eventtag_at_fcf_scan;
+	uint32_t fcoe_cvl_eventtag;
+	uint32_t fcoe_cvl_eventtag_attn;
 	struct lpfc_fcf fcf;
 	uint8_t fc_map[3];
 	uint8_t valid_vlan;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 296ad5b..5eb2bc1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2575,7 +2575,7 @@
 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
 # objects that have been registered with the nameserver after login.
 */
-LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1,
+LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
 		  "Deregister nameserver objects before LOGO");
 
 /*
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 22e17be..af04b0d 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2007-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2007-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -997,38 +997,41 @@
 	return nbytes;
 }
 
-static int
-lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t
 lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
 	size_t nbytes, loff_t *ppos)
 {
 	struct dentry *dent = file->f_dentry;
 	struct lpfc_hba *phba = file->private_data;
-	char cbuf[16];
+	char cbuf[32];
+	uint64_t tmp = 0;
 	int cnt = 0;
 
 	if (dent == phba->debug_writeGuard)
-		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
 	else if (dent == phba->debug_writeApp)
-		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
 	else if (dent == phba->debug_writeRef)
-		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
 	else if (dent == phba->debug_readGuard)
-		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
 	else if (dent == phba->debug_readApp)
-		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
 	else if (dent == phba->debug_readRef)
-		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt);
-	else if (dent == phba->debug_InjErrLBA)
-		cnt = snprintf(cbuf, 16, "0x%lx\n",
-				 (unsigned long) phba->lpfc_injerr_lba);
-	else
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+	else if (dent == phba->debug_InjErrNPortID)
+		cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+	else if (dent == phba->debug_InjErrWWPN) {
+		memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
+		tmp = cpu_to_be64(tmp);
+		cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+	} else if (dent == phba->debug_InjErrLBA) {
+		if (phba->lpfc_injerr_lba == (sector_t)(-1))
+			cnt = snprintf(cbuf, 32, "off\n");
+		else
+			cnt = snprintf(cbuf, 32, "0x%llx\n",
+				 (uint64_t) phba->lpfc_injerr_lba);
+	} else
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 			 "0547 Unknown debugfs error injection entry\n");
 
@@ -1042,7 +1045,7 @@
 	struct dentry *dent = file->f_dentry;
 	struct lpfc_hba *phba = file->private_data;
 	char dstbuf[32];
-	unsigned long tmp;
+	uint64_t tmp = 0;
 	int size;
 
 	memset(dstbuf, 0, 32);
@@ -1050,7 +1053,12 @@
 	if (copy_from_user(dstbuf, buf, size))
 		return 0;
 
-	if (strict_strtoul(dstbuf, 0, &tmp))
+	if (dent == phba->debug_InjErrLBA) {
+		if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+			tmp = (uint64_t)(-1);
+	}
+
+	if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
 		return 0;
 
 	if (dent == phba->debug_writeGuard)
@@ -1067,7 +1075,12 @@
 		phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
 	else if (dent == phba->debug_InjErrLBA)
 		phba->lpfc_injerr_lba = (sector_t)tmp;
-	else
+	else if (dent == phba->debug_InjErrNPortID)
+		phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
+	else if (dent == phba->debug_InjErrWWPN) {
+		tmp = cpu_to_be64(tmp);
+		memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
+	} else
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 			 "0548 Unknown debugfs error injection entry\n");
 
@@ -3521,7 +3534,7 @@
 #undef lpfc_debugfs_op_dif_err
 static const struct file_operations lpfc_debugfs_op_dif_err = {
 	.owner =	THIS_MODULE,
-	.open =		lpfc_debugfs_dif_err_open,
+	.open =		simple_open,
 	.llseek =	lpfc_debugfs_lseek,
 	.read =		lpfc_debugfs_dif_err_read,
 	.write =	lpfc_debugfs_dif_err_write,
@@ -3949,6 +3962,28 @@
 		}
 		phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
 
+		snprintf(name, sizeof(name), "InjErrNPortID");
+		phba->debug_InjErrNPortID =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrNPortID) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0809 Cannot create debugfs InjErrNPortID\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "InjErrWWPN");
+		phba->debug_InjErrWWPN =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrWWPN) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0810 Cannot create debugfs InjErrWWPN\n");
+			goto debug_failed;
+		}
+
 		snprintf(name, sizeof(name), "writeGuardInjErr");
 		phba->debug_writeGuard =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -4321,6 +4356,14 @@
 			debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
 			phba->debug_InjErrLBA = NULL;
 		}
+		if (phba->debug_InjErrNPortID) {	 /* InjErrNPortID */
+			debugfs_remove(phba->debug_InjErrNPortID);
+			phba->debug_InjErrNPortID = NULL;
+		}
+		if (phba->debug_InjErrWWPN) {
+			debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+			phba->debug_InjErrWWPN = NULL;
+		}
 		if (phba->debug_writeGuard) {
 			debugfs_remove(phba->debug_writeGuard); /* writeGuard */
 			phba->debug_writeGuard = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8db2fb3..3407b39 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -925,9 +925,17 @@
 		 * due to new FCF discovery
 		 */
 		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
-		    (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
-		    !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-		     (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
+		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+			if (phba->link_state < LPFC_LINK_UP)
+				goto stop_rr_fcf_flogi;
+			if ((phba->fcoe_cvl_eventtag_attn ==
+			     phba->fcoe_cvl_eventtag) &&
+			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+			    (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
+				goto stop_rr_fcf_flogi;
+			else
+				phba->fcoe_cvl_eventtag_attn =
+					phba->fcoe_cvl_eventtag;
 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
 					"2611 FLOGI failed on FCF (x%x), "
 					"status:x%x/x%x, tmo:x%x, perform "
@@ -943,6 +951,7 @@
 				goto out;
 		}
 
+stop_rr_fcf_flogi:
 		/* FLOGI failure */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 				"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 343d87b..b507536 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2843,7 +2843,14 @@
 	struct lpfc_vport *vport = mboxq->vport;
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-	if (mboxq->u.mb.mbxStatus) {
+	/*
+	 * VFI not supported for interface type 0, so ignore any mailbox
+	 * error (except VFI in use) and continue with the discovery.
+	 */
+	if (mboxq->u.mb.mbxStatus &&
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+			LPFC_SLI_INTF_IF_TYPE_0) &&
+	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
 			 "2018 REG_VFI mbxStatus error x%x "
 			 "HBA state x%x\n",
@@ -5673,14 +5680,13 @@
 				ret = 1;
 				spin_unlock_irq(shost->host_lock);
 				goto out;
-			} else {
+			} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+				ret = 1;
 				lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-					"2624 RPI %x DID %x flg %x still "
-					"logged in\n",
-					ndlp->nlp_rpi, ndlp->nlp_DID,
-					ndlp->nlp_flag);
-				if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
-					ret = 1;
+						"2624 RPI %x DID %x flag %x "
+						"still logged in\n",
+						ndlp->nlp_rpi, ndlp->nlp_DID,
+						ndlp->nlp_flag);
 			}
 		}
 		spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 9e2b9b22..91f0976 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * Copyright (C) 2009-2012 Emulex.  All rights reserved.                *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -338,6 +338,12 @@
 #define CQE_CODE_XRI_ABORTED		0x5
 #define CQE_CODE_RECEIVE_V1		0x9
 
+/*
+ * Define mask value for xri_aborted and wcqe completed CQE extended status.
+ * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
+ */
+#define WCQE_PARAM_MASK		0x1FF;
+
 /* completion queue entry for wqe completions */
 struct lpfc_wcqe_complete {
 	uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b38f99f..9598fdc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2704,16 +2704,14 @@
 				}
 				spin_lock_irq(shost->host_lock);
 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-
+				spin_unlock_irq(shost->host_lock);
 				/*
 				 * Whenever an SLI4 port goes offline, free the
-				 * RPI.  A new RPI when the adapter port comes
-				 * back online.
+				 * RPI. Get a new RPI when the adapter port
+				 * comes back online.
 				 */
 				if (phba->sli_rev == LPFC_SLI_REV4)
 					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
-
-				spin_unlock_irq(shost->host_lock);
 				lpfc_unreg_rpi(vports[i], ndlp);
 			}
 		}
@@ -2786,9 +2784,13 @@
 
 	spin_lock_irq(&phba->hbalock);
 	spin_lock(&phba->scsi_buf_list_lock);
-	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
 		sb->cur_iocbq.sli4_xritag =
 			phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+		set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
+		phba->sli4_hba.max_cfg_param.xri_used++;
+		phba->sli4_hba.xri_count++;
+	}
 	spin_unlock(&phba->scsi_buf_list_lock);
 	spin_unlock_irq(&phba->hbalock);
 	return 0;
@@ -3723,6 +3725,7 @@
 		break;
 
 	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
 			"2549 FCF (x%x) disconnected from network, "
 			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -3784,6 +3787,7 @@
 		}
 		break;
 	case LPFC_FIP_EVENT_TYPE_CVL:
+		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
 			"2718 Clear Virtual Link Received for VPI 0x%x"
 			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -5226,8 +5230,7 @@
 	 * rpi is normalized to a zero base because the physical rpi is
 	 * port based.
 	 */
-	curr_rpi_range = phba->sli4_hba.next_rpi -
-		phba->sli4_hba.max_cfg_param.rpi_base;
+	curr_rpi_range = phba->sli4_hba.next_rpi;
 	spin_unlock_irq(&phba->hbalock);
 
 	/*
@@ -5818,10 +5821,9 @@
 					readl(phba->sli4_hba.u.if_type2.
 					      ERR2regaddr);
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2888 Port Error Detected "
-					"during POST: "
-					"port status reg 0x%x, "
-					"port_smphr reg 0x%x, "
+					"2888 Unrecoverable port error "
+					"following POST: port status reg "
+					"0x%x, port_smphr reg 0x%x, "
 					"error 1=0x%x, error 2=0x%x\n",
 					reg_data.word0,
 					portsmphr_reg.word0,
@@ -6142,7 +6144,6 @@
 		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
 		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
 		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
-		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
 		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
 				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
 		phba->max_vports = phba->max_vpi;
@@ -7231,6 +7232,7 @@
 	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
 	union lpfc_sli4_cfg_shdr *shdr;
 	struct lpfc_register reg_data;
+	uint16_t devid;
 
 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 	switch (if_type) {
@@ -7277,7 +7279,9 @@
 			       LPFC_SLIPORT_INIT_PORT);
 			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
 			       CTRLregaddr);
-
+			/* flush */
+			pci_read_config_word(phba->pcidev,
+					     PCI_DEVICE_ID, &devid);
 			/*
 			 * Poll the Port Status Register and wait for RDY for
 			 * up to 10 seconds.  If the port doesn't respond, treat
@@ -7315,11 +7319,10 @@
 				phba->work_status[1] = readl(
 					phba->sli4_hba.u.if_type2.ERR2regaddr);
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2890 Port Error Detected "
-					"during Port Reset: "
-					"port status reg 0x%x, "
+					"2890 Port error detected during port "
+					"reset(%d): port status reg 0x%x, "
 					"error 1=0x%x, error 2=0x%x\n",
-					reg_data.word0,
+					num_resets, reg_data.word0,
 					phba->work_status[0],
 					phba->work_status[1]);
 				rc = -ENODEV;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 7b6b2aa..15ca2a9a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
  /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -440,11 +440,15 @@
 		spin_unlock_irq(shost->host_lock);
 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
-		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
 			ndlp, mbox);
+		if (rc)
+			mempool_free(mbox, phba->mbox_mem_pool);
 		return 1;
 	}
-	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+	if (rc)
+		mempool_free(mbox, phba->mbox_mem_pool);
 	return 1;
 out:
 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index efc055b..88f3a83 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -39,8 +39,8 @@
 #include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
-#include "lpfc_scsi.h"
 #include "lpfc.h"
+#include "lpfc_scsi.h"
 #include "lpfc_logmsg.h"
 #include "lpfc_crtn.h"
 #include "lpfc_vport.h"
@@ -51,13 +51,19 @@
 int _dump_buf_done;
 
 static char *dif_op_str[] = {
-	"SCSI_PROT_NORMAL",
-	"SCSI_PROT_READ_INSERT",
-	"SCSI_PROT_WRITE_STRIP",
-	"SCSI_PROT_READ_STRIP",
-	"SCSI_PROT_WRITE_INSERT",
-	"SCSI_PROT_READ_PASS",
-	"SCSI_PROT_WRITE_PASS",
+	"PROT_NORMAL",
+	"PROT_READ_INSERT",
+	"PROT_WRITE_STRIP",
+	"PROT_READ_STRIP",
+	"PROT_WRITE_INSERT",
+	"PROT_READ_PASS",
+	"PROT_WRITE_PASS",
+};
+
+static char *dif_grd_str[] = {
+	"NO_GUARD",
+	"DIF_CRC",
+	"DIX_IP",
 };
 
 struct scsi_dif_tuple {
@@ -1281,10 +1287,14 @@
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 
-#define BG_ERR_INIT	1
-#define BG_ERR_TGT	2
-#define BG_ERR_SWAP	3
-#define BG_ERR_CHECK	4
+/* Return if if error injection is detected by Initiator */
+#define BG_ERR_INIT	0x1
+/* Return if if error injection is detected by Target */
+#define BG_ERR_TGT	0x2
+/* Return if if swapping CSUM<-->CRC is required for error injection */
+#define BG_ERR_SWAP	0x10
+/* Return if disabling Guard/Ref/App checking is required for error injection */
+#define BG_ERR_CHECK	0x20
 
 /**
  * lpfc_bg_err_inject - Determine if we should inject an error
@@ -1294,10 +1304,7 @@
  * @apptag: (out) BlockGuard application tag for transmitted data
  * @new_guard (in) Value to replace CRC with if needed
  *
- * Returns (1) if error injection is detected by Initiator
- * Returns (2) if error injection is detected by Target
- * Returns (3) if swapping CSUM->CRC is required for error injection
- * Returns (4) disabling Guard/Ref/App checking is required for error injection
+ * Returns BG_ERR_* bit mask or 0 if request ignored
  **/
 static int
 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
@@ -1305,7 +1312,10 @@
 {
 	struct scatterlist *sgpe; /* s/g prot entry */
 	struct scatterlist *sgde; /* s/g data entry */
+	struct lpfc_scsi_buf *lpfc_cmd = NULL;
 	struct scsi_dif_tuple *src = NULL;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_rport_data *rdata;
 	uint32_t op = scsi_get_prot_op(sc);
 	uint32_t blksize;
 	uint32_t numblks;
@@ -1318,8 +1328,9 @@
 
 	sgpe = scsi_prot_sglist(sc);
 	sgde = scsi_sglist(sc);
-
 	lba = scsi_get_lba(sc);
+
+	/* First check if we need to match the LBA */
 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
 		blksize = lpfc_cmd_blksize(sc);
 		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
@@ -1334,37 +1345,104 @@
 				sizeof(struct scsi_dif_tuple);
 			if (numblks < blockoff)
 				blockoff = numblks;
-			src = (struct scsi_dif_tuple *)sg_virt(sgpe);
-			src += blockoff;
 		}
 	}
 
+	/* Next check if we need to match the remote NPortID or WWPN */
+	rdata = sc->device->hostdata;
+	if (rdata && rdata->pnode) {
+		ndlp = rdata->pnode;
+
+		/* Make sure we have the right NPortID if one is specified */
+		if (phba->lpfc_injerr_nportid  &&
+			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
+			return 0;
+
+		/*
+		 * Make sure we have the right WWPN if one is specified.
+		 * wwn[0] should be a non-zero NAA in a good WWPN.
+		 */
+		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
+			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
+				sizeof(struct lpfc_name)) != 0))
+			return 0;
+	}
+
+	/* Setup a ptr to the protection data if the SCSI host provides it */
+	if (sgpe) {
+		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+		src += blockoff;
+		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+	}
+
 	/* Should we change the Reference Tag */
 	if (reftag) {
 		if (phba->lpfc_injerr_wref_cnt) {
 			switch (op) {
 			case SCSI_PROT_WRITE_PASS:
-				if (blockoff && src) {
-					/* Insert error in middle of the IO */
+				if (src) {
+					/*
+					 * For WRITE_PASS, force the error
+					 * to be sent on the wire. It should
+					 * be detected by the Target.
+					 * If blockoff != 0 error will be
+					 * inserted in middle of the IO.
+					 */
 
 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 					"9076 BLKGRD: Injecting reftag error: "
 					"write lba x%lx + x%x oldrefTag x%x\n",
 					(unsigned long)lba, blockoff,
-					src->ref_tag);
+					be32_to_cpu(src->ref_tag));
 
 					/*
-					 * NOTE, this will change ref tag in
-					 * the memory location forever!
+					 * Save the old ref_tag so we can
+					 * restore it on completion.
 					 */
-					src->ref_tag = 0xDEADBEEF;
+					if (lpfc_cmd) {
+						lpfc_cmd->prot_data_type =
+							LPFC_INJERR_REFTAG;
+						lpfc_cmd->prot_data_segment =
+							src;
+						lpfc_cmd->prot_data =
+							src->ref_tag;
+					}
+					src->ref_tag = cpu_to_be32(0xDEADBEEF);
 					phba->lpfc_injerr_wref_cnt--;
-					phba->lpfc_injerr_lba =
-						LPFC_INJERR_LBA_OFF;
-					rc = BG_ERR_CHECK;
+					if (phba->lpfc_injerr_wref_cnt == 0) {
+						phba->lpfc_injerr_nportid = 0;
+						phba->lpfc_injerr_lba =
+							LPFC_INJERR_LBA_OFF;
+						memset(&phba->lpfc_injerr_wwpn,
+						  0, sizeof(struct lpfc_name));
+					}
+					rc = BG_ERR_TGT | BG_ERR_CHECK;
+
 					break;
 				}
 				/* Drop thru */
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the error
+				 * to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				/* DEADBEEF will be the reftag on the wire */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_wref_cnt--;
+				if (phba->lpfc_injerr_wref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+					LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9078 BLKGRD: Injecting reftag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
 			case SCSI_PROT_WRITE_STRIP:
 				/*
 				 * For WRITE_STRIP and WRITE_PASS,
@@ -1373,39 +1451,24 @@
 				 */
 				*reftag = 0xDEADBEEF;
 				phba->lpfc_injerr_wref_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+				if (phba->lpfc_injerr_wref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
 				rc = BG_ERR_INIT;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 					"9077 BLKGRD: Injecting reftag error: "
 					"write lba x%lx\n", (unsigned long)lba);
 				break;
-			case SCSI_PROT_WRITE_INSERT:
-				/*
-				 * For WRITE_INSERT, force the
-				 * error to be sent on the wire. It should be
-				 * detected by the Target.
-				 */
-				/* DEADBEEF will be the reftag on the wire */
-				*reftag = 0xDEADBEEF;
-				phba->lpfc_injerr_wref_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-				rc = BG_ERR_TGT;
-
-				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9078 BLKGRD: Injecting reftag error: "
-					"write lba x%lx\n", (unsigned long)lba);
-				break;
 			}
 		}
 		if (phba->lpfc_injerr_rref_cnt) {
 			switch (op) {
 			case SCSI_PROT_READ_INSERT:
-				/*
-				 * For READ_INSERT, it doesn't make sense
-				 * to change the reftag.
-				 */
-				break;
 			case SCSI_PROT_READ_STRIP:
 			case SCSI_PROT_READ_PASS:
 				/*
@@ -1415,7 +1478,13 @@
 				 */
 				*reftag = 0xDEADBEEF;
 				phba->lpfc_injerr_rref_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+				if (phba->lpfc_injerr_rref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
 				rc = BG_ERR_INIT;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1431,42 +1500,46 @@
 		if (phba->lpfc_injerr_wapp_cnt) {
 			switch (op) {
 			case SCSI_PROT_WRITE_PASS:
-				if (blockoff && src) {
-					/* Insert error in middle of the IO */
+				if (src) {
+					/*
+					 * For WRITE_PASS, force the error
+					 * to be sent on the wire. It should
+					 * be detected by the Target.
+					 * If blockoff != 0 error will be
+					 * inserted in middle of the IO.
+					 */
 
 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 					"9080 BLKGRD: Injecting apptag error: "
 					"write lba x%lx + x%x oldappTag x%x\n",
 					(unsigned long)lba, blockoff,
-					src->app_tag);
+					be16_to_cpu(src->app_tag));
 
 					/*
-					 * NOTE, this will change app tag in
-					 * the memory location forever!
+					 * Save the old app_tag so we can
+					 * restore it on completion.
 					 */
-					src->app_tag = 0xDEAD;
+					if (lpfc_cmd) {
+						lpfc_cmd->prot_data_type =
+							LPFC_INJERR_APPTAG;
+						lpfc_cmd->prot_data_segment =
+							src;
+						lpfc_cmd->prot_data =
+							src->app_tag;
+					}
+					src->app_tag = cpu_to_be16(0xDEAD);
 					phba->lpfc_injerr_wapp_cnt--;
-					phba->lpfc_injerr_lba =
-						LPFC_INJERR_LBA_OFF;
-					rc = BG_ERR_CHECK;
+					if (phba->lpfc_injerr_wapp_cnt == 0) {
+						phba->lpfc_injerr_nportid = 0;
+						phba->lpfc_injerr_lba =
+							LPFC_INJERR_LBA_OFF;
+						memset(&phba->lpfc_injerr_wwpn,
+						  0, sizeof(struct lpfc_name));
+					}
+					rc = BG_ERR_TGT | BG_ERR_CHECK;
 					break;
 				}
 				/* Drop thru */
-			case SCSI_PROT_WRITE_STRIP:
-				/*
-				 * For WRITE_STRIP and WRITE_PASS,
-				 * force the error on data
-				 * being copied from SLI-Host to SLI-Port.
-				 */
-				*apptag = 0xDEAD;
-				phba->lpfc_injerr_wapp_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-				rc = BG_ERR_INIT;
-
-				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"0812 BLKGRD: Injecting apptag error: "
-					"write lba x%lx\n", (unsigned long)lba);
-				break;
 			case SCSI_PROT_WRITE_INSERT:
 				/*
 				 * For WRITE_INSERT, force the
@@ -1476,23 +1549,45 @@
 				/* DEAD will be the apptag on the wire */
 				*apptag = 0xDEAD;
 				phba->lpfc_injerr_wapp_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-				rc = BG_ERR_TGT;
+				if (phba->lpfc_injerr_wapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_TGT | BG_ERR_CHECK;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 					"0813 BLKGRD: Injecting apptag error: "
 					"write lba x%lx\n", (unsigned long)lba);
 				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_wapp_cnt--;
+				if (phba->lpfc_injerr_wapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0812 BLKGRD: Injecting apptag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
 			}
 		}
 		if (phba->lpfc_injerr_rapp_cnt) {
 			switch (op) {
 			case SCSI_PROT_READ_INSERT:
-				/*
-				 * For READ_INSERT, it doesn't make sense
-				 * to change the apptag.
-				 */
-				break;
 			case SCSI_PROT_READ_STRIP:
 			case SCSI_PROT_READ_PASS:
 				/*
@@ -1502,7 +1597,13 @@
 				 */
 				*apptag = 0xDEAD;
 				phba->lpfc_injerr_rapp_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+				if (phba->lpfc_injerr_rapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
 				rc = BG_ERR_INIT;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1519,43 +1620,9 @@
 		if (phba->lpfc_injerr_wgrd_cnt) {
 			switch (op) {
 			case SCSI_PROT_WRITE_PASS:
-				if (blockoff && src) {
-					/* Insert error in middle of the IO */
-
-					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"0815 BLKGRD: Injecting guard error: "
-					"write lba x%lx + x%x oldgrdTag x%x\n",
-					(unsigned long)lba, blockoff,
-					src->guard_tag);
-
-					/*
-					 * NOTE, this will change guard tag in
-					 * the memory location forever!
-					 */
-					src->guard_tag = 0xDEAD;
-					phba->lpfc_injerr_wgrd_cnt--;
-					phba->lpfc_injerr_lba =
-						LPFC_INJERR_LBA_OFF;
-					rc = BG_ERR_CHECK;
-					break;
-				}
+				rc = BG_ERR_CHECK;
 				/* Drop thru */
-			case SCSI_PROT_WRITE_STRIP:
-				/*
-				 * For WRITE_STRIP and WRITE_PASS,
-				 * force the error on data
-				 * being copied from SLI-Host to SLI-Port.
-				 */
-				phba->lpfc_injerr_wgrd_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
 
-				rc = BG_ERR_SWAP;
-				/* Signals the caller to swap CRC->CSUM */
-
-				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"0816 BLKGRD: Injecting guard error: "
-					"write lba x%lx\n", (unsigned long)lba);
-				break;
 			case SCSI_PROT_WRITE_INSERT:
 				/*
 				 * For WRITE_INSERT, force the
@@ -1563,25 +1630,48 @@
 				 * detected by the Target.
 				 */
 				phba->lpfc_injerr_wgrd_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+				if (phba->lpfc_injerr_wgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
 
-				rc = BG_ERR_SWAP;
+				rc |= BG_ERR_TGT | BG_ERR_SWAP;
 				/* Signals the caller to swap CRC->CSUM */
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 					"0817 BLKGRD: Injecting guard error: "
 					"write lba x%lx\n", (unsigned long)lba);
 				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				phba->lpfc_injerr_wgrd_cnt--;
+				if (phba->lpfc_injerr_wgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+
+				rc = BG_ERR_INIT | BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0816 BLKGRD: Injecting guard error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
 			}
 		}
 		if (phba->lpfc_injerr_rgrd_cnt) {
 			switch (op) {
 			case SCSI_PROT_READ_INSERT:
-				/*
-				 * For READ_INSERT, it doesn't make sense
-				 * to change the guard tag.
-				 */
-				break;
 			case SCSI_PROT_READ_STRIP:
 			case SCSI_PROT_READ_PASS:
 				/*
@@ -1589,11 +1679,16 @@
 				 * error on data being read off the wire. It
 				 * should force an IO error to the driver.
 				 */
-				*apptag = 0xDEAD;
 				phba->lpfc_injerr_rgrd_cnt--;
-				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+				if (phba->lpfc_injerr_rgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
 
-				rc = BG_ERR_SWAP;
+				rc = BG_ERR_INIT | BG_ERR_SWAP;
 				/* Signals the caller to swap CRC->CSUM */
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1629,20 +1724,20 @@
 		switch (scsi_get_prot_op(sc)) {
 		case SCSI_PROT_READ_INSERT:
 		case SCSI_PROT_WRITE_STRIP:
-			*txop = BG_OP_IN_CSUM_OUT_NODIF;
 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_NODIF;
 			break;
 
 		case SCSI_PROT_READ_STRIP:
 		case SCSI_PROT_WRITE_INSERT:
-			*txop = BG_OP_IN_NODIF_OUT_CRC;
 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CRC;
 			break;
 
 		case SCSI_PROT_READ_PASS:
 		case SCSI_PROT_WRITE_PASS:
-			*txop = BG_OP_IN_CSUM_OUT_CRC;
 			*rxop = BG_OP_IN_CRC_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_CRC;
 			break;
 
 		case SCSI_PROT_NORMAL:
@@ -1658,20 +1753,20 @@
 		switch (scsi_get_prot_op(sc)) {
 		case SCSI_PROT_READ_STRIP:
 		case SCSI_PROT_WRITE_INSERT:
-			*txop = BG_OP_IN_NODIF_OUT_CRC;
 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CRC;
 			break;
 
 		case SCSI_PROT_READ_PASS:
 		case SCSI_PROT_WRITE_PASS:
-			*txop = BG_OP_IN_CRC_OUT_CRC;
 			*rxop = BG_OP_IN_CRC_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_CRC;
 			break;
 
 		case SCSI_PROT_READ_INSERT:
 		case SCSI_PROT_WRITE_STRIP:
-			*txop = BG_OP_IN_CRC_OUT_NODIF;
 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_NODIF;
 			break;
 
 		case SCSI_PROT_NORMAL:
@@ -1710,20 +1805,20 @@
 		switch (scsi_get_prot_op(sc)) {
 		case SCSI_PROT_READ_INSERT:
 		case SCSI_PROT_WRITE_STRIP:
-			*txop = BG_OP_IN_CRC_OUT_NODIF;
 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_NODIF;
 			break;
 
 		case SCSI_PROT_READ_STRIP:
 		case SCSI_PROT_WRITE_INSERT:
-			*txop = BG_OP_IN_NODIF_OUT_CSUM;
 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
 			break;
 
 		case SCSI_PROT_READ_PASS:
 		case SCSI_PROT_WRITE_PASS:
-			*txop = BG_OP_IN_CRC_OUT_CRC;
-			*rxop = BG_OP_IN_CRC_OUT_CRC;
+			*rxop = BG_OP_IN_CSUM_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_CSUM;
 			break;
 
 		case SCSI_PROT_NORMAL:
@@ -1735,20 +1830,20 @@
 		switch (scsi_get_prot_op(sc)) {
 		case SCSI_PROT_READ_STRIP:
 		case SCSI_PROT_WRITE_INSERT:
-			*txop = BG_OP_IN_NODIF_OUT_CSUM;
 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
 			break;
 
 		case SCSI_PROT_READ_PASS:
 		case SCSI_PROT_WRITE_PASS:
-			*txop = BG_OP_IN_CSUM_OUT_CRC;
-			*rxop = BG_OP_IN_CRC_OUT_CSUM;
+			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_CSUM;
 			break;
 
 		case SCSI_PROT_READ_INSERT:
 		case SCSI_PROT_WRITE_STRIP:
-			*txop = BG_OP_IN_CSUM_OUT_NODIF;
 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_NODIF;
 			break;
 
 		case SCSI_PROT_NORMAL:
@@ -1817,11 +1912,11 @@
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
 	if (rc) {
-		if (rc == BG_ERR_SWAP)
+		if (rc & BG_ERR_SWAP)
 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-		if (rc == BG_ERR_CHECK)
+		if (rc & BG_ERR_CHECK)
 			checking = 0;
 	}
 #endif
@@ -1964,11 +2059,11 @@
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
 	if (rc) {
-		if (rc == BG_ERR_SWAP)
+		if (rc & BG_ERR_SWAP)
 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-		if (rc == BG_ERR_CHECK)
+		if (rc & BG_ERR_CHECK)
 			checking = 0;
 	}
 #endif
@@ -2172,11 +2267,11 @@
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
 	if (rc) {
-		if (rc == BG_ERR_SWAP)
+		if (rc & BG_ERR_SWAP)
 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-		if (rc == BG_ERR_CHECK)
+		if (rc & BG_ERR_CHECK)
 			checking = 0;
 	}
 #endif
@@ -2312,11 +2407,11 @@
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
 	if (rc) {
-		if (rc == BG_ERR_SWAP)
+		if (rc & BG_ERR_SWAP)
 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-		if (rc == BG_ERR_CHECK)
+		if (rc & BG_ERR_CHECK)
 			checking = 0;
 	}
 #endif
@@ -2788,7 +2883,7 @@
 		/* No error was reported - problem in FW? */
 		cmd->result = ScsiResult(DID_ERROR, 0);
 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9057 BLKGRD: no errors reported!\n");
+			"9057 BLKGRD: Unknown error reported!\n");
 	}
 
 out:
@@ -3460,6 +3555,37 @@
 	/* pick up SLI4 exhange busy status from HBA */
 	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
 
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (lpfc_cmd->prot_data_type) {
+		struct scsi_dif_tuple *src = NULL;
+
+		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+		/*
+		 * Used to restore any changes to protection
+		 * data for error injection.
+		 */
+		switch (lpfc_cmd->prot_data_type) {
+		case LPFC_INJERR_REFTAG:
+			src->ref_tag =
+				lpfc_cmd->prot_data;
+			break;
+		case LPFC_INJERR_APPTAG:
+			src->app_tag =
+				(uint16_t)lpfc_cmd->prot_data;
+			break;
+		case LPFC_INJERR_GUARD:
+			src->guard_tag =
+				(uint16_t)lpfc_cmd->prot_data;
+			break;
+		default:
+			break;
+		}
+
+		lpfc_cmd->prot_data = 0;
+		lpfc_cmd->prot_data_type = 0;
+		lpfc_cmd->prot_data_segment = NULL;
+	}
+#endif
 	if (pnode && NLP_CHK_NODE_ACT(pnode))
 		atomic_dec(&pnode->cmd_pending);
 
@@ -4061,15 +4187,6 @@
 		cmnd->result = err;
 		goto out_fail_command;
 	}
-	/*
-	 * Do not let the mid-layer retry I/O too fast. If an I/O is retried
-	 * without waiting a bit then indicate that the device is busy.
-	 */
-	if (cmnd->retries &&
-	    time_before(jiffies, (cmnd->jiffies_at_alloc +
-				  msecs_to_jiffies(LPFC_RETRY_PAUSE *
-						   cmnd->retries))))
-		return SCSI_MLQUEUE_DEVICE_BUSY;
 	ndlp = rdata->pnode;
 
 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
@@ -4119,63 +4236,48 @@
 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
 		if (vport->phba->cfg_enable_bg) {
 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-				"9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
-				"str=%s\n",
-				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
-				dif_op_str[scsi_get_prot_op(cmnd)]);
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-				"9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
-				"%02x %02x %02x %02x %02x\n",
-				cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
-				cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
-				cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
-				cmnd->cmnd[9]);
+				"9033 BLKGRD: rcvd protected cmd:%02x op=%s "
+				"guard=%s\n", cmnd->cmnd[0],
+				dif_op_str[scsi_get_prot_op(cmnd)],
+				dif_grd_str[scsi_host_get_guard(shost)]);
 			if (cmnd->cmnd[0] == READ_10)
 				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 					"9035 BLKGRD: READ @ sector %llu, "
-					"count %u\n",
+					"cnt %u, rpt %d\n",
 					(unsigned long long)scsi_get_lba(cmnd),
-					blk_rq_sectors(cmnd->request));
+					blk_rq_sectors(cmnd->request),
+					(cmnd->cmnd[1]>>5));
 			else if (cmnd->cmnd[0] == WRITE_10)
 				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 					"9036 BLKGRD: WRITE @ sector %llu, "
-					"count %u cmd=%p\n",
+					"cnt %u, wpt %d\n",
 					(unsigned long long)scsi_get_lba(cmnd),
 					blk_rq_sectors(cmnd->request),
-					cmnd);
+					(cmnd->cmnd[1]>>5));
 		}
 
 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
 	} else {
 		if (vport->phba->cfg_enable_bg) {
 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-					"9038 BLKGRD: rcvd unprotected cmd:"
-					"%02x op:%02x str=%s\n",
-					cmnd->cmnd[0], scsi_get_prot_op(cmnd),
-					dif_op_str[scsi_get_prot_op(cmnd)]);
-				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-					"9039 BLKGRD: CDB: %02x %02x %02x "
-					"%02x %02x %02x %02x %02x %02x %02x\n",
-					cmnd->cmnd[0], cmnd->cmnd[1],
-					cmnd->cmnd[2], cmnd->cmnd[3],
-					cmnd->cmnd[4], cmnd->cmnd[5],
-					cmnd->cmnd[6], cmnd->cmnd[7],
-					cmnd->cmnd[8], cmnd->cmnd[9]);
+				"9038 BLKGRD: rcvd unprotected cmd:"
+				"%02x op=%s guard=%s\n", cmnd->cmnd[0],
+				dif_op_str[scsi_get_prot_op(cmnd)],
+				dif_grd_str[scsi_host_get_guard(shost)]);
 			if (cmnd->cmnd[0] == READ_10)
 				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 					"9040 dbg: READ @ sector %llu, "
-					"count %u\n",
+					"cnt %u, rpt %d\n",
 					(unsigned long long)scsi_get_lba(cmnd),
-					 blk_rq_sectors(cmnd->request));
+					 blk_rq_sectors(cmnd->request),
+					(cmnd->cmnd[1]>>5));
 			else if (cmnd->cmnd[0] == WRITE_10)
 				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-					 "9041 dbg: WRITE @ sector %llu, "
-					 "count %u cmd=%p\n",
-					 (unsigned long long)scsi_get_lba(cmnd),
-					 blk_rq_sectors(cmnd->request), cmnd);
-			else
-				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-					 "9042 dbg: parser not implemented\n");
+					"9041 dbg: WRITE @ sector %llu, "
+					"cnt %u, wpt %d\n",
+					(unsigned long long)scsi_get_lba(cmnd),
+					blk_rq_sectors(cmnd->request),
+					(cmnd->cmnd[1]>>5));
 		}
 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
 	}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 9075a08..21a2ffe 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -150,9 +150,18 @@
 	struct lpfc_iocbq cur_iocbq;
 	wait_queue_head_t *waitq;
 	unsigned long start_time;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	/* Used to restore any changes to protection data for error injection */
+	void *prot_data_segment;
+	uint32_t prot_data;
+	uint32_t prot_data_type;
+#define	LPFC_INJERR_REFTAG	1
+#define	LPFC_INJERR_APPTAG	2
+#define	LPFC_INJERR_GUARD	3
+#endif
 };
 
 #define LPFC_SCSI_DMA_EXT_SIZE 264
 #define LPFC_BPL_SIZE          1024
-#define LPFC_RETRY_PAUSE       300
 #define MDAC_DIRECT_CMD                  0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e0e4d8d..dbaf5b9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -5578,8 +5578,6 @@
 		for (i = 0; i < count; i++)
 			phba->sli4_hba.rpi_ids[i] = base + i;
 
-		lpfc_sli4_node_prep(phba);
-
 		/* VPIs. */
 		count = phba->sli4_hba.max_cfg_param.max_vpi;
 		base = phba->sli4_hba.max_cfg_param.vpi_base;
@@ -5613,6 +5611,8 @@
 			rc = -ENOMEM;
 			goto free_vpi_ids;
 		}
+		phba->sli4_hba.max_cfg_param.xri_used = 0;
+		phba->sli4_hba.xri_count = 0;
 		phba->sli4_hba.xri_ids = kzalloc(count *
 						 sizeof(uint16_t),
 						 GFP_KERNEL);
@@ -6147,6 +6147,7 @@
 		rc = -ENODEV;
 		goto out_free_mbox;
 	}
+	lpfc_sli4_node_prep(phba);
 
 	/* Create all the SLI4 queues */
 	rc = lpfc_sli4_queue_create(phba);
@@ -7251,11 +7252,13 @@
 
 out_not_finished:
 	spin_lock_irqsave(&phba->hbalock, iflags);
-	mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
-	__lpfc_mbox_cmpl_put(phba, mboxq);
-	/* Release the token */
-	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-	phba->sli.mbox_active = NULL;
+	if (phba->sli.mbox_active) {
+		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		__lpfc_mbox_cmpl_put(phba, mboxq);
+		/* Release the token */
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		phba->sli.mbox_active = NULL;
+	}
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
 	return MBX_NOT_FINISHED;
@@ -7743,6 +7746,7 @@
 			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
 				*pcmd == ELS_CMD_SCR ||
 				*pcmd == ELS_CMD_FDISC ||
+				*pcmd == ELS_CMD_LOGO ||
 				*pcmd == ELS_CMD_PLOGI)) {
 				bf_set(els_req64_sp, &wqe->els_req, 1);
 				bf_set(els_req64_sid, &wqe->els_req,
@@ -8385,6 +8389,7 @@
 			   struct sli4_wcqe_xri_aborted *axri)
 {
 	struct lpfc_vport *vport;
+	uint32_t ext_status = 0;
 
 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -8396,12 +8401,20 @@
 	vport = ndlp->vport;
 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 			"3116 Port generated FCP XRI ABORT event on "
-			"vpi %d rpi %d xri x%x status 0x%x\n",
+			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
 			ndlp->vport->vpi, ndlp->nlp_rpi,
 			bf_get(lpfc_wcqe_xa_xri, axri),
-			bf_get(lpfc_wcqe_xa_status, axri));
+			bf_get(lpfc_wcqe_xa_status, axri),
+			axri->parameter);
 
-	if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+	/*
+	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
+	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
+	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
+	 */
+	ext_status = axri->parameter & WCQE_PARAM_MASK;
+	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
+	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
 		lpfc_sli_abts_recover_port(vport, ndlp);
 }
 
@@ -9807,12 +9820,11 @@
 	unsigned long timeout;
 
 	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
-	spin_unlock_irq(&phba->hbalock);
 
 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
-		spin_lock_irq(&phba->hbalock);
 		/* Determine how long we might wait for the active mailbox
 		 * command to be gracefully completed by firmware.
 		 */
@@ -9831,7 +9843,9 @@
 				 */
 				break;
 		}
-	}
+	} else
+		spin_unlock_irq(&phba->hbalock);
+
 	lpfc_sli_mbox_sys_flush(phba);
 }
 
@@ -13272,7 +13286,7 @@
 	LPFC_MBOXQ_t *mbox;
 	uint32_t reqlen, alloclen, index;
 	uint32_t mbox_tmo;
-	uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+	uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
 	uint16_t xritag_start = 0, lxri = 0;
 	struct lpfc_rsrc_blks *rsrc_blk;
 	int cnt, ttl_cnt, rc = 0;
@@ -13294,6 +13308,7 @@
 
 	cnt = 0;
 	ttl_cnt = 0;
+	post_els_xri_cnt = els_xri_cnt;
 	list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
 			    list) {
 		rsrc_start = rsrc_blk->rsrc_start;
@@ -13303,11 +13318,12 @@
 				"3014 Working ELS Extent start %d, cnt %d\n",
 				rsrc_start, rsrc_size);
 
-		loop_cnt = min(els_xri_cnt, rsrc_size);
-		if (ttl_cnt + loop_cnt >= els_xri_cnt) {
-			loop_cnt = els_xri_cnt - ttl_cnt;
-			ttl_cnt = els_xri_cnt;
-		}
+		loop_cnt = min(post_els_xri_cnt, rsrc_size);
+		if (loop_cnt < post_els_xri_cnt) {
+			post_els_xri_cnt -= loop_cnt;
+			ttl_cnt += loop_cnt;
+		} else
+			ttl_cnt += post_els_xri_cnt;
 
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (!mbox)
@@ -14203,15 +14219,14 @@
 		 * field and RX_ID from ABTS for RX_ID field.
 		 */
 		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
-		bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
 	} else {
 		/* ABTS sent by initiator to CT exchange, construction
 		 * of BA_ACC will need to allocate a new XRI as for the
-		 * XRI_TAG and RX_ID fields.
+		 * XRI_TAG field.
 		 */
 		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
-		bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
 	}
+	bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
 	bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
 
 	/* Xmit CT abts response on exchange <xid> */
@@ -15042,6 +15057,7 @@
 	LPFC_MBOXQ_t *mboxq;
 
 	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
+	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mboxq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f2a2602..25cefc2 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.29"
+#define LPFC_DRIVER_VERSION "8.3.30"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 5e69f46..8a59a77 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -657,7 +657,7 @@
 		return;
 
 	/* eat the loginfos associated with task aborts */
-	if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
+	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
 	    0x31140000 || log_info == 0x31130000))
 		return;
 
@@ -2060,12 +2060,10 @@
 {
 	int i = 0;
 	char desc[16];
-	u8 revision;
 	u32 iounit_pg1_flags;
 	u32 bios_version;
 
 	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
-	pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
 	strncpy(desc, ioc->manu_pg0.ChipName, 16);
 	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
 	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
@@ -2074,7 +2072,7 @@
 	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
 	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
 	   ioc->facts.FWVersion.Word & 0x000000FF,
-	   revision,
+	   ioc->pdev->revision,
 	   (bios_version & 0xFF000000) >> 24,
 	   (bios_version & 0x00FF0000) >> 16,
 	   (bios_version & 0x0000FF00) >> 8,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 7fceb89..3b9a28e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1026,7 +1026,6 @@
 {
 	struct mpt2_ioctl_iocinfo karg;
 	struct MPT2SAS_ADAPTER *ioc;
-	u8 revision;
 
 	if (copy_from_user(&karg, arg, sizeof(karg))) {
 		printk(KERN_ERR "failure at %s:%d/%s()!\n",
@@ -1046,8 +1045,7 @@
 		karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
 	if (ioc->pfacts)
 		karg.port_number = ioc->pfacts[0].PortNumber;
-	pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
-	karg.hw_rev = revision;
+	karg.hw_rev = ioc->pdev->revision;
 	karg.pci_id = ioc->pdev->device;
 	karg.subsystem_device = ioc->pdev->subsystem_device;
 	karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 3619f6e..9d82ee5 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2093,6 +2093,7 @@
 	struct ata_task_resp *resp ;
 	u32 *sata_resp;
 	struct pm8001_device *pm8001_dev;
+	unsigned long flags;
 
 	psataPayload = (struct sata_completion_resp *)(piomb + 4);
 	status = le32_to_cpu(psataPayload->status);
@@ -2382,26 +2383,26 @@
 		ts->stat = SAS_DEV_NO_RESPONSE;
 		break;
 	}
-	spin_lock_irq(&t->task_state_lock);
+	spin_lock_irqsave(&t->task_state_lock, flags);
 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
 	t->task_state_flags |= SAS_TASK_STATE_DONE;
 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
-		spin_unlock_irq(&t->task_state_lock);
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("task 0x%p done with io_status 0x%x"
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, status, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 	} else if (t->uldd_task) {
-		spin_unlock_irq(&t->task_state_lock);
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/* ditto */
 		spin_unlock_irq(&pm8001_ha->lock);
 		t->task_done(t);
 		spin_lock_irq(&pm8001_ha->lock);
 	} else if (!t->uldd_task) {
-		spin_unlock_irq(&t->task_state_lock);
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/*ditto*/
 		spin_unlock_irq(&pm8001_ha->lock);
@@ -2423,6 +2424,7 @@
 	u32 tag = le32_to_cpu(psataPayload->tag);
 	u32 port_id = le32_to_cpu(psataPayload->port_id);
 	u32 dev_id = le32_to_cpu(psataPayload->device_id);
+	unsigned long flags;
 
 	ccb = &pm8001_ha->ccb_info[tag];
 	t = ccb->task;
@@ -2593,26 +2595,26 @@
 		ts->stat = SAS_OPEN_TO;
 		break;
 	}
-	spin_lock_irq(&t->task_state_lock);
+	spin_lock_irqsave(&t->task_state_lock, flags);
 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
 	t->task_state_flags |= SAS_TASK_STATE_DONE;
 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
-		spin_unlock_irq(&t->task_state_lock);
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("task 0x%p done with io_status 0x%x"
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, event, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 	} else if (t->uldd_task) {
-		spin_unlock_irq(&t->task_state_lock);
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/* ditto */
 		spin_unlock_irq(&pm8001_ha->lock);
 		t->task_done(t);
 		spin_lock_irq(&pm8001_ha->lock);
 	} else if (!t->uldd_task) {
-		spin_unlock_irq(&t->task_state_lock);
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/*ditto*/
 		spin_unlock_irq(&pm8001_ha->lock);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 7c9f28b..fc542a9 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -431,9 +431,9 @@
 				  mbox_sts_entry->out_mbox[6]));
 
 		if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
-			status = QLA_SUCCESS;
+			status = ISCSI_PING_SUCCESS;
 		else
-			status = QLA_ERROR;
+			status = mbox_sts_entry->out_mbox[6];
 
 		data_size = sizeof(mbox_sts_entry->out_mbox);
 
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 3d94194..ee47820 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -834,7 +834,7 @@
 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
 {
 	struct scsi_qla_host *ha = to_qla_host(shost);
-	struct iscsi_cls_host *ihost = shost_priv(shost);
+	struct iscsi_cls_host *ihost = shost->shost_data;
 	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
 
 	qla4xxx_get_firmware_state(ha);
@@ -859,7 +859,7 @@
 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
 {
 	struct scsi_qla_host *ha = to_qla_host(shost);
-	struct iscsi_cls_host *ihost = shost_priv(shost);
+	struct iscsi_cls_host *ihost = shost->shost_data;
 	uint32_t state = ISCSI_PORT_STATE_DOWN;
 
 	if (test_bit(AF_LINK_UP, &ha->flags))
@@ -3445,7 +3445,6 @@
 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
 {
 	int status = 0;
-	uint8_t revision_id;
 	unsigned long mem_base, mem_len, db_base, db_len;
 	struct pci_dev *pdev = ha->pdev;
 
@@ -3457,10 +3456,9 @@
 		goto iospace_error_exit;
 	}
 
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
-	    __func__, revision_id));
-	ha->revision_id = revision_id;
+	    __func__, pdev->revision));
+	ha->revision_id = pdev->revision;
 
 	/* remap phys address */
 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index ede9af9..97b30c1 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k15"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k16"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5918561..182d5a5 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -101,6 +101,7 @@
 #define DEF_LBPU 0
 #define DEF_LBPWS 0
 #define DEF_LBPWS10 0
+#define DEF_LBPRZ 1
 #define DEF_LOWEST_ALIGNED 0
 #define DEF_NO_LUN_0   0
 #define DEF_NUM_PARTS   0
@@ -186,6 +187,7 @@
 static unsigned int scsi_debug_lbpu = DEF_LBPU;
 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
+static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
@@ -775,10 +777,10 @@
 	return 0x3c;
 }
 
-/* Thin provisioning VPD page (SBC-3) */
+/* Logical block provisioning VPD page (SBC-3) */
 static int inquiry_evpd_b2(unsigned char *arr)
 {
-	memset(arr, 0, 0x8);
+	memset(arr, 0, 0x4);
 	arr[0] = 0;			/* threshold exponent */
 
 	if (scsi_debug_lbpu)
@@ -790,7 +792,10 @@
 	if (scsi_debug_lbpws10)
 		arr[1] |= 1 << 5;
 
-	return 0x8;
+	if (scsi_debug_lbprz)
+		arr[1] |= 1 << 2;
+
+	return 0x4;
 }
 
 #define SDEBUG_LONG_INQ_SZ 96
@@ -1071,8 +1076,11 @@
 	arr[13] = scsi_debug_physblk_exp & 0xf;
 	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
 
-	if (scsi_debug_lbp())
+	if (scsi_debug_lbp()) {
 		arr[14] |= 0x80; /* LBPME */
+		if (scsi_debug_lbprz)
+			arr[14] |= 0x40; /* LBPRZ */
+	}
 
 	arr[15] = scsi_debug_lowest_aligned & 0xff;
 
@@ -2046,10 +2054,13 @@
 		block = lba + alignment;
 		rem = do_div(block, granularity);
 
-		if (rem == 0 && lba + granularity <= end &&
-		    block < map_size)
+		if (rem == 0 && lba + granularity <= end && block < map_size) {
 			clear_bit(block, map_storep);
-
+			if (scsi_debug_lbprz)
+				memset(fake_storep +
+				       block * scsi_debug_sector_size, 0,
+				       scsi_debug_sector_size);
+		}
 		lba += granularity - rem;
 	}
 }
@@ -2731,6 +2742,7 @@
 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
@@ -2772,6 +2784,7 @@
 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fac3173..1cf640e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1486,7 +1486,7 @@
 	struct iscsi_uevent *ev;
 	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
 
-	skb = alloc_skb(len, GFP_KERNEL);
+	skb = alloc_skb(len, GFP_NOIO);
 	if (!skb) {
 		printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
 		       host_no, code);
@@ -1504,7 +1504,7 @@
 	if (data_size)
 		memcpy((char *)ev + sizeof(*ev), data, data_size);
 
-	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
 }
 EXPORT_SYMBOL_GPL(iscsi_post_host_event);
 
@@ -1517,7 +1517,7 @@
 	struct iscsi_uevent *ev;
 	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
 
-	skb = alloc_skb(len, GFP_KERNEL);
+	skb = alloc_skb(len, GFP_NOIO);
 	if (!skb) {
 		printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
 		return;
@@ -1533,7 +1533,7 @@
 	ev->r.ping_comp.data_size = data_size;
 	memcpy((char *)ev + sizeof(*ev), data, data_size);
 
-	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
 }
 EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
 
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 09e3df4..5ba5c2a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -664,7 +664,7 @@
 }
 
 /**
- *	sd_init_command - build a scsi (read or write) command from
+ *	sd_prep_fn - build a scsi (read or write) command from
  *	information in the request structure.
  *	@SCpnt: pointer to mid-level's per scsi command structure that
  *	contains request and into which the scsi command is written
@@ -711,7 +711,7 @@
 	ret = BLKPREP_KILL;
 
 	SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
-					"sd_init_command: block=%llu, "
+					"sd_prep_fn: block=%llu, "
 					"count=%d\n",
 					(unsigned long long)block,
 					this_count));
@@ -1212,9 +1212,14 @@
 	retval = -ENODEV;
 
 	if (scsi_block_when_processing_errors(sdp)) {
+		retval = scsi_autopm_get_device(sdp);
+		if (retval)
+			goto out;
+
 		sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
 		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
 					      sshdr);
+		scsi_autopm_put_device(sdp);
 	}
 
 	/* failed to execute TUR, assume media not present */
@@ -2644,8 +2649,8 @@
  *	(e.g. /dev/sda). More precisely it is the block device major 
  *	and minor number that is chosen here.
  *
- *	Assume sd_attach is not re-entrant (for time being)
- *	Also think about sd_attach() and sd_remove() running coincidentally.
+ *	Assume sd_probe is not re-entrant (for time being)
+ *	Also think about sd_probe() and sd_remove() running coincidentally.
  **/
 static int sd_probe(struct device *dev)
 {
@@ -2660,7 +2665,7 @@
 		goto out;
 
 	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
-					"sd_attach\n"));
+					"sd_probe\n"));
 
 	error = -ENOMEM;
 	sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a15f691..e41998c 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1105,6 +1105,12 @@
                                     STp->drv_buffer));
 		}
 		STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
+		if (!STp->drv_buffer && STp->immediate_filemark) {
+			printk(KERN_WARNING
+			    "%s: non-buffered tape: disabling writing immediate filemarks\n",
+			    name);
+			STp->immediate_filemark = 0;
+		}
 	}
 	st_release_request(SRpnt);
 	SRpnt = NULL;
@@ -1313,6 +1319,8 @@
 
 		memset(cmd, 0, MAX_COMMAND_SIZE);
 		cmd[0] = WRITE_FILEMARKS;
+		if (STp->immediate_filemark)
+			cmd[1] = 1;
 		cmd[4] = 1 + STp->two_fm;
 
 		SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
@@ -2180,8 +2188,9 @@
 		       name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
 		       STp->scsi2_logical);
 		printk(KERN_INFO
-		       "%s:    sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate,
-			STp->sili);
+		       "%s:    sysv: %d nowait: %d sili: %d nowait_filemark: %d\n",
+		       name, STm->sysv, STp->immediate, STp->sili,
+		       STp->immediate_filemark);
 		printk(KERN_INFO "%s:    debugging: %d\n",
 		       name, debugging);
 	}
@@ -2223,6 +2232,7 @@
 			STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
 		STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
 		STp->immediate = (options & MT_ST_NOWAIT) != 0;
+		STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0;
 		STm->sysv = (options & MT_ST_SYSV) != 0;
 		STp->sili = (options & MT_ST_SILI) != 0;
 		DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
@@ -2254,6 +2264,8 @@
 			STp->scsi2_logical = value;
 		if ((options & MT_ST_NOWAIT) != 0)
 			STp->immediate = value;
+		if ((options & MT_ST_NOWAIT_EOF) != 0)
+			STp->immediate_filemark = value;
 		if ((options & MT_ST_SYSV) != 0)
 			STm->sysv = value;
 		if ((options & MT_ST_SILI) != 0)
@@ -2713,7 +2725,8 @@
 		cmd[0] = WRITE_FILEMARKS;
 		if (cmd_in == MTWSM)
 			cmd[1] = 2;
-		if (cmd_in == MTWEOFI)
+		if (cmd_in == MTWEOFI ||
+		    (cmd_in == MTWEOF && STp->immediate_filemark))
 			cmd[1] |= 1;
 		cmd[2] = (arg >> 16);
 		cmd[3] = (arg >> 8);
@@ -4092,6 +4105,7 @@
 	tpnt->scsi2_logical = ST_SCSI2LOGICAL;
 	tpnt->sili = ST_SILI;
 	tpnt->immediate = ST_NOWAIT;
+	tpnt->immediate_filemark = 0;
 	tpnt->default_drvbuffer = 0xff;		/* No forced buffering */
 	tpnt->partition = 0;
 	tpnt->new_partition = 0;
@@ -4477,6 +4491,7 @@
 	options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0;
 	options |= STm->sysv ? MT_ST_SYSV : 0;
 	options |= STp->immediate ? MT_ST_NOWAIT : 0;
+	options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0;
 	options |= STp->sili ? MT_ST_SILI : 0;
 
 	l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index f91a67c..ea35632 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -120,6 +120,7 @@
 	unsigned char c_algo;			/* compression algorithm */
 	unsigned char pos_unknown;			/* after reset position unknown */
 	unsigned char sili;			/* use SILI when reading in variable b mode */
+	unsigned char immediate_filemark;	/* write filemark immediately */
 	int tape_type;
 	int long_timeout;	/* timeout for commands known to take long time */
 
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
new file mode 100644
index 0000000..8f27f9d
--- /dev/null
+++ b/drivers/scsi/ufs/Kconfig
@@ -0,0 +1,49 @@
+#
+# Kernel configuration file for the UFS Host Controller
+#
+# This code is based on drivers/scsi/ufs/Kconfig
+# Copyright (C) 2011  Samsung Samsung India Software Operations
+#
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+# USA.
+
+config SCSI_UFSHCD
+	tristate "Universal Flash Storage host controller driver"
+	depends on PCI && SCSI
+	---help---
+	This is a generic driver which supports PCIe UFS Host controllers.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
new file mode 100644
index 0000000..adf7895
--- /dev/null
+++ b/drivers/scsi/ufs/Makefile
@@ -0,0 +1,2 @@
+# UFSHCD makefile
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
new file mode 100644
index 0000000..b207529
--- /dev/null
+++ b/drivers/scsi/ufs/ufs.h
@@ -0,0 +1,207 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufs.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#ifndef _UFS_H
+#define _UFS_H
+
+#define MAX_CDB_SIZE	16
+
+#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
+			((byte3 << 24) | (byte2 << 16) |\
+			 (byte1 << 8) | (byte0))
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+	UFS_ABORT_TASK		= 0x01,
+	UFS_ABORT_TASK_SET	= 0x02,
+	UFS_CLEAR_TASK_SET	= 0x04,
+	UFS_LOGICAL_RESET	= 0x08,
+	UFS_QUERY_TASK		= 0x80,
+	UFS_QUERY_TASK_SET	= 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+	UPIU_TRANSACTION_NOP_OUT	= 0x00,
+	UPIU_TRANSACTION_COMMAND	= 0x01,
+	UPIU_TRANSACTION_DATA_OUT	= 0x02,
+	UPIU_TRANSACTION_TASK_REQ	= 0x04,
+	UPIU_TRANSACTION_QUERY_REQ	= 0x26,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+	UPIU_TRANSACTION_NOP_IN		= 0x20,
+	UPIU_TRANSACTION_RESPONSE	= 0x21,
+	UPIU_TRANSACTION_DATA_IN	= 0x22,
+	UPIU_TRANSACTION_TASK_RSP	= 0x24,
+	UPIU_TRANSACTION_READY_XFER	= 0x31,
+	UPIU_TRANSACTION_QUERY_RSP	= 0x36,
+};
+
+/* UPIU Read/Write flags */
+enum {
+	UPIU_CMD_FLAGS_NONE	= 0x00,
+	UPIU_CMD_FLAGS_WRITE	= 0x20,
+	UPIU_CMD_FLAGS_READ	= 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+	UPIU_TASK_ATTR_SIMPLE	= 0x00,
+	UPIU_TASK_ATTR_ORDERED	= 0x01,
+	UPIU_TASK_ATTR_HEADQ	= 0x02,
+	UPIU_TASK_ATTR_ACA	= 0x03,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum {
+	UPIU_QUERY_OPCODE_NOP		= 0x0,
+	UPIU_QUERY_OPCODE_READ_DESC	= 0x1,
+	UPIU_QUERY_OPCODE_WRITE_DESC	= 0x2,
+	UPIU_QUERY_OPCODE_READ_ATTR	= 0x3,
+	UPIU_QUERY_OPCODE_WRITE_ATTR	= 0x4,
+	UPIU_QUERY_OPCODE_READ_FLAG	= 0x5,
+	UPIU_QUERY_OPCODE_SET_FLAG	= 0x6,
+	UPIU_QUERY_OPCODE_CLEAR_FLAG	= 0x7,
+	UPIU_QUERY_OPCODE_TOGGLE_FLAG	= 0x8,
+};
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+	UPIU_COMMAND_SET_TYPE_SCSI	= 0x0,
+	UPIU_COMMAND_SET_TYPE_UFS	= 0x1,
+	UPIU_COMMAND_SET_TYPE_QUERY	= 0x2,
+};
+
+enum {
+	MASK_SCSI_STATUS	= 0xFF,
+	MASK_TASK_RESPONSE	= 0xFF00,
+	MASK_RSP_UPIU_RESULT	= 0xFFFF,
+};
+
+/* Task management service response */
+enum {
+	UPIU_TASK_MANAGEMENT_FUNC_COMPL		= 0x00,
+	UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+	UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED	= 0x08,
+	UPIU_TASK_MANAGEMENT_FUNC_FAILED	= 0x05,
+	UPIU_INCORRECT_LOGICAL_UNIT_NO		= 0x09,
+};
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+	u32 dword_0;
+	u32 dword_1;
+	u32 dword_2;
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+	struct utp_upiu_header header;
+	u32 exp_data_transfer_len;
+	u8 cdb[MAX_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_rsp - Response UPIU structure
+ * @header: UPIU header DW-0 to DW-2
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+struct utp_upiu_rsp {
+	struct utp_upiu_header header;
+	u32 residual_transfer_count;
+	u32 reserved[4];
+	u16 sense_data_len;
+	u8 sense_data[18];
+};
+
+/**
+ * struct utp_upiu_task_req - Task request UPIU structure
+ * @header - UPIU header structure DW0 to DW-2
+ * @input_param1: Input parameter 1 DW-3
+ * @input_param2: Input parameter 2 DW-4
+ * @input_param3: Input parameter 3 DW-5
+ * @reserved: Reserved double words DW-6 to DW-7
+ */
+struct utp_upiu_task_req {
+	struct utp_upiu_header header;
+	u32 input_param1;
+	u32 input_param2;
+	u32 input_param3;
+	u32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_task_rsp - Task Management Response UPIU structure
+ * @header: UPIU header structure DW0-DW-2
+ * @output_param1: Ouput parameter 1 DW3
+ * @output_param2: Output parameter 2 DW4
+ * @reserved: Reserved double words DW-5 to DW-7
+ */
+struct utp_upiu_task_rsp {
+	struct utp_upiu_header header;
+	u32 output_param1;
+	u32 output_param2;
+	u32 reserved[3];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
new file mode 100644
index 0000000..52b96e8
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -0,0 +1,1978 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.1"
+
+enum {
+	UFSHCD_MAX_CHANNEL	= 0,
+	UFSHCD_MAX_ID		= 1,
+	UFSHCD_MAX_LUNS		= 8,
+	UFSHCD_CMD_PER_LUN	= 32,
+	UFSHCD_CAN_QUEUE	= 32,
+};
+
+/* UFSHCD states */
+enum {
+	UFSHCD_STATE_OPERATIONAL,
+	UFSHCD_STATE_RESET,
+	UFSHCD_STATE_ERROR,
+};
+
+/* Interrupt configuration options */
+enum {
+	UFSHCD_INT_DISABLE,
+	UFSHCD_INT_ENABLE,
+	UFSHCD_INT_CLEAR,
+};
+
+/* Interrupt aggregation options */
+enum {
+	INT_AGGR_RESET,
+	INT_AGGR_CONFIG,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+	u32 command;
+	u32 argument1;
+	u32 argument2;
+	u32 argument3;
+	int cmd_active;
+	int result;
+};
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @pdev: PCI device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+	void __iomem *mmio_base;
+
+	/* Virtual memory reference */
+	struct utp_transfer_cmd_desc *ucdl_base_addr;
+	struct utp_transfer_req_desc *utrdl_base_addr;
+	struct utp_task_req_desc *utmrdl_base_addr;
+
+	/* DMA memory reference */
+	dma_addr_t ucdl_dma_addr;
+	dma_addr_t utrdl_dma_addr;
+	dma_addr_t utmrdl_dma_addr;
+
+	struct Scsi_Host *host;
+	struct pci_dev *pdev;
+
+	struct ufshcd_lrb *lrb;
+
+	unsigned long outstanding_tasks;
+	unsigned long outstanding_reqs;
+
+	u32 capabilities;
+	int nutrs;
+	int nutmrs;
+	u32 ufs_version;
+
+	struct uic_command active_uic_cmd;
+	wait_queue_head_t ufshcd_tm_wait_queue;
+	unsigned long tm_condition;
+
+	u32 ufshcd_state;
+	u32 int_enable_mask;
+
+	/* Work Queues */
+	struct work_struct uic_workq;
+	struct work_struct feh_workq;
+
+	/* HBA Errors */
+	u32 errors;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+	struct utp_transfer_req_desc *utr_descriptor_ptr;
+	struct utp_upiu_cmd *ucd_cmd_ptr;
+	struct utp_upiu_rsp *ucd_rsp_ptr;
+	struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+	struct scsi_cmnd *cmd;
+	u8 *sense_buffer;
+	unsigned int sense_bufflen;
+	int scsi_status;
+
+	int command_type;
+	int task_tag;
+	unsigned int lun;
+};
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba - Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+	return readl(hba->mmio_base + REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ *			      the host controller
+ * @reg_hcs - host controller status register value
+ *
+ * Returns 0 if device present, non-zero if no device detected
+ */
+static inline int ufshcd_is_device_present(u32 reg_hcs)
+{
+	return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrb: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+	return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+	return task_req_descp->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ *
+ * Returns maximum number of task management request slots in case of
+ * task management queue full or returns the free slot number
+ */
+static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
+{
+	return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+	writel(~(1 << pos),
+		(hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR));
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+	/*
+	 * The mask 0xFF is for the following HCS register bits
+	 * Bit		Description
+	 *  0		Device Present
+	 *  1		UTRLRDY
+	 *  2		UTMRLRDY
+	 *  3		UCRDY
+	 *  4		HEI
+	 *  5		DEI
+	 * 6-7		reserved
+	 */
+	return (((reg) & (0xFF)) >> 1) ^ (0x07);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+	return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) &
+	       MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_free_hba_memory - Free allocated memory for LRB, request
+ *			    and task lists
+ * @hba: Pointer to adapter instance
+ */
+static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
+{
+	size_t utmrdl_size, utrdl_size, ucdl_size;
+
+	kfree(hba->lrb);
+
+	if (hba->utmrdl_base_addr) {
+		utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+		dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+				  hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
+	}
+
+	if (hba->utrdl_base_addr) {
+		utrdl_size =
+		(sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+		dma_free_coherent(&hba->pdev->dev, utrdl_size,
+				  hba->utrdl_base_addr, hba->utrdl_dma_addr);
+	}
+
+	if (hba->ucdl_base_addr) {
+		ucdl_size =
+		(sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+		dma_free_coherent(&hba->pdev->dev, ucdl_size,
+				  hba->ucdl_base_addr, hba->ucdl_dma_addr);
+	}
+}
+
+/**
+ * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function checks the response UPIU for valid transaction type in
+ * response field
+ * Returns 0 on success, non-zero on failure
+ */
+static inline int
+ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+	return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
+		 UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+	return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/**
+ * ufshcd_config_int_aggr - Configure interrupt aggregation values.
+ *		Currently there is no use case where we want to configure
+ *		interrupt aggregation dynamically. So to configure interrupt
+ *		aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
+ *		INT_AGGR_TIMEOUT_VALUE are used.
+ * @hba: per adapter instance
+ * @option: Interrupt aggregation option
+ */
+static inline void
+ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+{
+	switch (option) {
+	case INT_AGGR_RESET:
+		writel((INT_AGGR_ENABLE |
+			INT_AGGR_COUNTER_AND_TIMER_RESET),
+			(hba->mmio_base +
+			 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+		break;
+	case INT_AGGR_CONFIG:
+		writel((INT_AGGR_ENABLE |
+			INT_AGGR_PARAM_WRITE |
+			INT_AGGR_COUNTER_THRESHOLD_VALUE |
+			INT_AGGR_TIMEOUT_VALUE),
+			(hba->mmio_base +
+			 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+		break;
+	}
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ *			When run-stop registers are set to 1, it indicates the
+ *			host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+	writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+	       (hba->mmio_base +
+		REG_UTP_TASK_REQ_LIST_RUN_STOP));
+	writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+	       (hba->mmio_base +
+		REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+	writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+	writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns zero if controller is active, 1 otherwise
+ */
+static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+	return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+}
+
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+	__set_bit(task_tag, &hba->outstanding_reqs);
+	writel((1 << task_tag),
+	       (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL));
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrb - pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+	int len;
+	if (lrbp->sense_buffer) {
+		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
+		memcpy(lrbp->sense_buffer,
+			lrbp->ucd_rsp_ptr->sense_data,
+			min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+	}
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+	hba->capabilities =
+		readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES);
+
+	/* nutrs and nutmrs are 0 based values */
+	hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+	hba->nutmrs =
+	((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_send_uic_command - Send UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_command: UIC command
+ */
+static inline void
+ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
+{
+	/* Write Args */
+	writel(uic_cmnd->argument1,
+	      (hba->mmio_base + REG_UIC_COMMAND_ARG_1));
+	writel(uic_cmnd->argument2,
+	      (hba->mmio_base + REG_UIC_COMMAND_ARG_2));
+	writel(uic_cmnd->argument3,
+	      (hba->mmio_base + REG_UIC_COMMAND_ARG_3));
+
+	/* Write UIC Cmd */
+	writel((uic_cmnd->command & COMMAND_OPCODE_MASK),
+	       (hba->mmio_base + REG_UIC_COMMAND));
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @lrbp - pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+{
+	struct ufshcd_sg_entry *prd_table;
+	struct scatterlist *sg;
+	struct scsi_cmnd *cmd;
+	int sg_segments;
+	int i;
+
+	cmd = lrbp->cmd;
+	sg_segments = scsi_dma_map(cmd);
+	if (sg_segments < 0)
+		return sg_segments;
+
+	if (sg_segments) {
+		lrbp->utr_descriptor_ptr->prd_table_length =
+					cpu_to_le16((u16) (sg_segments));
+
+		prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+		scsi_for_each_sg(cmd, sg, sg_segments, i) {
+			prd_table[i].size  =
+				cpu_to_le32(((u32) sg_dma_len(sg))-1);
+			prd_table[i].base_addr =
+				cpu_to_le32(lower_32_bits(sg->dma_address));
+			prd_table[i].upper_addr =
+				cpu_to_le32(upper_32_bits(sg->dma_address));
+		}
+	} else {
+		lrbp->utr_descriptor_ptr->prd_table_length = 0;
+	}
+
+	return 0;
+}
+
+/**
+ * ufshcd_int_config - enable/disable interrupts
+ * @hba: per adapter instance
+ * @option: interrupt option
+ */
+static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
+{
+	switch (option) {
+	case UFSHCD_INT_ENABLE:
+		writel(hba->int_enable_mask,
+		      (hba->mmio_base + REG_INTERRUPT_ENABLE));
+		break;
+	case UFSHCD_INT_DISABLE:
+		if (hba->ufs_version == UFSHCI_VERSION_10)
+			writel(INTERRUPT_DISABLE_MASK_10,
+			      (hba->mmio_base + REG_INTERRUPT_ENABLE));
+		else
+			writel(INTERRUPT_DISABLE_MASK_11,
+			       (hba->mmio_base + REG_INTERRUPT_ENABLE));
+		break;
+	}
+}
+
+/**
+ * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * @lrb - pointer to local reference block
+ */
+static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
+{
+	struct utp_transfer_req_desc *req_desc;
+	struct utp_upiu_cmd *ucd_cmd_ptr;
+	u32 data_direction;
+	u32 upiu_flags;
+
+	ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
+	req_desc = lrbp->utr_descriptor_ptr;
+
+	switch (lrbp->command_type) {
+	case UTP_CMD_TYPE_SCSI:
+		if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
+			data_direction = UTP_DEVICE_TO_HOST;
+			upiu_flags = UPIU_CMD_FLAGS_READ;
+		} else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
+			data_direction = UTP_HOST_TO_DEVICE;
+			upiu_flags = UPIU_CMD_FLAGS_WRITE;
+		} else {
+			data_direction = UTP_NO_DATA_TRANSFER;
+			upiu_flags = UPIU_CMD_FLAGS_NONE;
+		}
+
+		/* Transfer request descriptor header fields */
+		req_desc->header.dword_0 =
+			cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
+
+		/*
+		 * assigning invalid value for command status. Controller
+		 * updates OCS on command completion, with the command
+		 * status
+		 */
+		req_desc->header.dword_2 =
+			cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+		/* command descriptor fields */
+		ucd_cmd_ptr->header.dword_0 =
+			cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
+						      upiu_flags,
+						      lrbp->lun,
+						      lrbp->task_tag));
+		ucd_cmd_ptr->header.dword_1 =
+			cpu_to_be32(
+				UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
+						  0,
+						  0,
+						  0));
+
+		/* Total EHS length and Data segment length will be zero */
+		ucd_cmd_ptr->header.dword_2 = 0;
+
+		ucd_cmd_ptr->exp_data_transfer_len =
+			cpu_to_be32(lrbp->cmd->transfersize);
+
+		memcpy(ucd_cmd_ptr->cdb,
+		       lrbp->cmd->cmnd,
+		       (min_t(unsigned short,
+			      lrbp->cmd->cmd_len,
+			      MAX_CDB_SIZE)));
+		break;
+	case UTP_CMD_TYPE_DEV_MANAGE:
+		/* For query function implementation */
+		break;
+	case UTP_CMD_TYPE_UFS:
+		/* For UFS native command implementation */
+		break;
+	} /* end of switch */
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @cmd: command from SCSI Midlayer
+ * @done: call back function
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+	struct ufshcd_lrb *lrbp;
+	struct ufs_hba *hba;
+	unsigned long flags;
+	int tag;
+	int err = 0;
+
+	hba = shost_priv(host);
+
+	tag = cmd->request->tag;
+
+	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+		err = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	lrbp = &hba->lrb[tag];
+
+	lrbp->cmd = cmd;
+	lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+	lrbp->sense_buffer = cmd->sense_buffer;
+	lrbp->task_tag = tag;
+	lrbp->lun = cmd->device->lun;
+
+	lrbp->command_type = UTP_CMD_TYPE_SCSI;
+
+	/* form UPIU before issuing the command */
+	ufshcd_compose_upiu(lrbp);
+	err = ufshcd_map_sg(lrbp);
+	if (err)
+		goto out;
+
+	/* issue command to the controller */
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_send_command(hba, tag);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+	return err;
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ *	Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ *	(UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+	size_t utmrdl_size, utrdl_size, ucdl_size;
+
+	/* Allocate memory for UTP command descriptors */
+	ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+	hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+						 ucdl_size,
+						 &hba->ucdl_dma_addr,
+						 GFP_KERNEL);
+
+	/*
+	 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+	 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+	 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+	 * be aligned to 128 bytes as well
+	 */
+	if (!hba->ucdl_base_addr ||
+	    WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+		dev_err(&hba->pdev->dev,
+			"Command Descriptor Memory allocation failed\n");
+		goto out;
+	}
+
+	/*
+	 * Allocate memory for UTP Transfer descriptors
+	 * UFSHCI requires 1024 byte alignment of UTRD
+	 */
+	utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+	hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+						  utrdl_size,
+						  &hba->utrdl_dma_addr,
+						  GFP_KERNEL);
+	if (!hba->utrdl_base_addr ||
+	    WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+		dev_err(&hba->pdev->dev,
+			"Transfer Descriptor Memory allocation failed\n");
+		goto out;
+	}
+
+	/*
+	 * Allocate memory for UTP Task Management descriptors
+	 * UFSHCI requires 1024 byte alignment of UTMRD
+	 */
+	utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+	hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+						   utmrdl_size,
+						   &hba->utmrdl_dma_addr,
+						   GFP_KERNEL);
+	if (!hba->utmrdl_base_addr ||
+	    WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+		dev_err(&hba->pdev->dev,
+		"Task Management Descriptor Memory allocation failed\n");
+		goto out;
+	}
+
+	/* Allocate memory for local reference block */
+	hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
+	if (!hba->lrb) {
+		dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+		goto out;
+	}
+	return 0;
+out:
+	ufshcd_free_hba_memory(hba);
+	return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ *				memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+	struct utp_transfer_cmd_desc *cmd_descp;
+	struct utp_transfer_req_desc *utrdlp;
+	dma_addr_t cmd_desc_dma_addr;
+	dma_addr_t cmd_desc_element_addr;
+	u16 response_offset;
+	u16 prdt_offset;
+	int cmd_desc_size;
+	int i;
+
+	utrdlp = hba->utrdl_base_addr;
+	cmd_descp = hba->ucdl_base_addr;
+
+	response_offset =
+		offsetof(struct utp_transfer_cmd_desc, response_upiu);
+	prdt_offset =
+		offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+	cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+	cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+	for (i = 0; i < hba->nutrs; i++) {
+		/* Configure UTRD with command descriptor base address */
+		cmd_desc_element_addr =
+				(cmd_desc_dma_addr + (cmd_desc_size * i));
+		utrdlp[i].command_desc_base_addr_lo =
+				cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+		utrdlp[i].command_desc_base_addr_hi =
+				cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+		/* Response upiu and prdt offset should be in double words */
+		utrdlp[i].response_upiu_offset =
+				cpu_to_le16((response_offset >> 2));
+		utrdlp[i].prd_table_offset =
+				cpu_to_le16((prdt_offset >> 2));
+		utrdlp[i].response_upiu_length =
+				cpu_to_le16(ALIGNED_UPIU_SIZE);
+
+		hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+		hba->lrb[i].ucd_cmd_ptr =
+			(struct utp_upiu_cmd *)(cmd_descp + i);
+		hba->lrb[i].ucd_rsp_ptr =
+			(struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+		hba->lrb[i].ucd_prdt_ptr =
+			(struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+	}
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+	struct uic_command *uic_cmd;
+	unsigned long flags;
+
+	/* check if controller is ready to accept UIC commands */
+	if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
+	    UIC_COMMAND_READY) == 0x0) {
+		dev_err(&hba->pdev->dev,
+			"Controller not ready"
+			" to accept UIC commands\n");
+		return -EIO;
+	}
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	/* form UIC command */
+	uic_cmd = &hba->active_uic_cmd;
+	uic_cmd->command = UIC_CMD_DME_LINK_STARTUP;
+	uic_cmd->argument1 = 0;
+	uic_cmd->argument2 = 0;
+	uic_cmd->argument3 = 0;
+
+	/* enable UIC related interrupts */
+	hba->int_enable_mask |= UIC_COMMAND_COMPL;
+	ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+	/* sending UIC commands to controller */
+	ufshcd_send_uic_command(hba, uic_cmd);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return 0;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Check if device is present
+ * 2. Configure run-stop-registers
+ * 3. Enable required interrupts
+ * 4. Configure interrupt aggregation
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+	int err = 0;
+	u32 reg;
+
+	/* check if device present */
+	reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
+	if (ufshcd_is_device_present(reg)) {
+		dev_err(&hba->pdev->dev, "cc: Device not present\n");
+		err = -ENXIO;
+		goto out;
+	}
+
+	/*
+	 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+	 * DEI, HEI bits must be 0
+	 */
+	if (!(ufshcd_get_lists_status(reg))) {
+		ufshcd_enable_run_stop_reg(hba);
+	} else {
+		dev_err(&hba->pdev->dev,
+			"Host controller not ready to process requests");
+		err = -EIO;
+		goto out;
+	}
+
+	/* Enable required interrupts */
+	hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL |
+				 UIC_ERROR |
+				 UTP_TASK_REQ_COMPL |
+				 DEVICE_FATAL_ERROR |
+				 CONTROLLER_FATAL_ERROR |
+				 SYSTEM_BUS_FATAL_ERROR);
+	ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+	/* Configure interrupt aggregation */
+	ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+
+	if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+		scsi_unblock_requests(hba->host);
+
+	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+	scsi_scan_host(hba->host);
+out:
+	return err;
+}
+
+/**
+ * ufshcd_hba_enable - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+	int retry;
+
+	/*
+	 * msleep of 1 and 5 used in this function might result in msleep(20),
+	 * but it was necessary to send the UFS FPGA to reset mode during
+	 * development and testing of this driver. msleep can be changed to
+	 * mdelay and retry count can be reduced based on the controller.
+	 */
+	if (!ufshcd_is_hba_active(hba)) {
+
+		/* change controller state to "reset state" */
+		ufshcd_hba_stop(hba);
+
+		/*
+		 * This delay is based on the testing done with UFS host
+		 * controller FPGA. The delay can be changed based on the
+		 * host controller used.
+		 */
+		msleep(5);
+	}
+
+	/* start controller initialization sequence */
+	ufshcd_hba_start(hba);
+
+	/*
+	 * To initialize a UFS host controller HCE bit must be set to 1.
+	 * During initialization the HCE bit value changes from 1->0->1.
+	 * When the host controller completes initialization sequence
+	 * it sets the value of HCE bit to 1. The same HCE bit is read back
+	 * to check if the controller has completed initialization sequence.
+	 * So without this delay the value HCE = 1, set in the previous
+	 * instruction might be read back.
+	 * This delay can be changed based on the controller.
+	 */
+	msleep(1);
+
+	/* wait for the host controller to complete initialization */
+	retry = 10;
+	while (ufshcd_is_hba_active(hba)) {
+		if (retry) {
+			retry--;
+		} else {
+			dev_err(&hba->pdev->dev,
+				"Controller enable failed\n");
+			return -EIO;
+		}
+		msleep(5);
+	}
+	return 0;
+}
+
+/**
+ * ufshcd_initialize_hba - start the initialization process
+ * @hba: per adapter instance
+ *
+ * 1. Enable the controller via ufshcd_hba_enable.
+ * 2. Program the Transfer Request List Address with the starting address of
+ * UTRDL.
+ * 3. Program the Task Management Request List Address with starting address
+ * of UTMRDL.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int ufshcd_initialize_hba(struct ufs_hba *hba)
+{
+	if (ufshcd_hba_enable(hba))
+		return -EIO;
+
+	/* Configure UTRL and UTMRL base address registers */
+	writel(hba->utrdl_dma_addr,
+	       (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
+	writel(lower_32_bits(hba->utrdl_dma_addr),
+	       (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
+	writel(hba->utmrdl_dma_addr,
+	       (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
+	writel(upper_32_bits(hba->utmrdl_dma_addr),
+	       (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
+
+	/* Initialize unipro link startup procedure */
+	return ufshcd_dme_link_startup(hba);
+}
+
+/**
+ * ufshcd_do_reset - reset the host controller
+ * @hba: per adapter instance
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_do_reset(struct ufs_hba *hba)
+{
+	struct ufshcd_lrb *lrbp;
+	unsigned long flags;
+	int tag;
+
+	/* block commands from midlayer */
+	scsi_block_requests(hba->host);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->ufshcd_state = UFSHCD_STATE_RESET;
+
+	/* send controller to reset state */
+	ufshcd_hba_stop(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/* abort outstanding commands */
+	for (tag = 0; tag < hba->nutrs; tag++) {
+		if (test_bit(tag, &hba->outstanding_reqs)) {
+			lrbp = &hba->lrb[tag];
+			scsi_dma_unmap(lrbp->cmd);
+			lrbp->cmd->result = DID_RESET << 16;
+			lrbp->cmd->scsi_done(lrbp->cmd);
+			lrbp->cmd = NULL;
+		}
+	}
+
+	/* clear outstanding request/task bit maps */
+	hba->outstanding_reqs = 0;
+	hba->outstanding_tasks = 0;
+
+	/* start the initialization process */
+	if (ufshcd_initialize_hba(hba)) {
+		dev_err(&hba->pdev->dev,
+			"Reset: Controller initialization failed\n");
+		return FAILED;
+	}
+	return SUCCESS;
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+	struct ufs_hba *hba;
+
+	hba = shost_priv(sdev->host);
+	sdev->tagged_supported = 1;
+
+	/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+	sdev->use_10_for_ms = 1;
+	scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+
+	/*
+	 * Inform SCSI Midlayer that the LUN queue depth is same as the
+	 * controller queue depth. If a LUN queue depth is less than the
+	 * controller queue depth and if the LUN reports
+	 * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
+	 * with scsi_adjust_queue_depth.
+	 */
+	scsi_activate_tcq(sdev, hba->nutrs);
+	return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+	struct ufs_hba *hba;
+
+	hba = shost_priv(sdev->host);
+	scsi_deactivate_tcq(sdev, hba->nutrs);
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
+{
+	struct utp_task_req_desc *task_req_descp;
+	struct utp_upiu_task_rsp *task_rsp_upiup;
+	unsigned long flags;
+	int ocs_value;
+	int task_result;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	/* Clear completed tasks from outstanding_tasks */
+	__clear_bit(index, &hba->outstanding_tasks);
+
+	task_req_descp = hba->utmrdl_base_addr;
+	ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+	if (ocs_value == OCS_SUCCESS) {
+		task_rsp_upiup = (struct utp_upiu_task_rsp *)
+				task_req_descp[index].task_rsp_upiu;
+		task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
+		task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
+
+		if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
+		    task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
+			task_result = FAILED;
+	} else {
+		task_result = FAILED;
+		dev_err(&hba->pdev->dev,
+			"trc: Invalid ocs = %x\n", ocs_value);
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return task_result;
+}
+
+/**
+ * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
+ *			      SAM_STAT_TASK_SET_FULL SCSI command status.
+ * @cmd: pointer to SCSI command
+ */
+static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
+{
+	struct ufs_hba *hba;
+	int i;
+	int lun_qdepth = 0;
+
+	hba = shost_priv(cmd->device->host);
+
+	/*
+	 * LUN queue depth can be obtained by counting outstanding commands
+	 * on the LUN.
+	 */
+	for (i = 0; i < hba->nutrs; i++) {
+		if (test_bit(i, &hba->outstanding_reqs)) {
+
+			/*
+			 * Check if the outstanding command belongs
+			 * to the LUN which reported SAM_STAT_TASK_SET_FULL.
+			 */
+			if (cmd->device->lun == hba->lrb[i].lun)
+				lun_qdepth++;
+		}
+	}
+
+	/*
+	 * LUN queue depth will be total outstanding commands, except the
+	 * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
+	 */
+	scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrb: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+	int result = 0;
+
+	switch (scsi_status) {
+	case SAM_STAT_GOOD:
+		result |= DID_OK << 16 |
+			  COMMAND_COMPLETE << 8 |
+			  SAM_STAT_GOOD;
+		break;
+	case SAM_STAT_CHECK_CONDITION:
+		result |= DID_OK << 16 |
+			  COMMAND_COMPLETE << 8 |
+			  SAM_STAT_CHECK_CONDITION;
+		ufshcd_copy_sense_data(lrbp);
+		break;
+	case SAM_STAT_BUSY:
+		result |= SAM_STAT_BUSY;
+		break;
+	case SAM_STAT_TASK_SET_FULL:
+
+		/*
+		 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
+		 * depth needs to be adjusted to the exact number of
+		 * outstanding commands the LUN can handle at any given time.
+		 */
+		ufshcd_adjust_lun_qdepth(lrbp->cmd);
+		result |= SAM_STAT_TASK_SET_FULL;
+		break;
+	case SAM_STAT_TASK_ABORTED:
+		result |= SAM_STAT_TASK_ABORTED;
+		break;
+	default:
+		result |= DID_ERROR << 16;
+		break;
+	} /* end of switch */
+
+	return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrb: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+	int result = 0;
+	int scsi_status;
+	int ocs;
+
+	/* overall command status of utrd */
+	ocs = ufshcd_get_tr_ocs(lrbp);
+
+	switch (ocs) {
+	case OCS_SUCCESS:
+
+		/* check if the returned transfer response is valid */
+		result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
+		if (result) {
+			dev_err(&hba->pdev->dev,
+				"Invalid response = %x\n", result);
+			break;
+		}
+
+		/*
+		 * get the response UPIU result to extract
+		 * the SCSI command status
+		 */
+		result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+		/*
+		 * get the result based on SCSI status response
+		 * to notify the SCSI midlayer of the command status
+		 */
+		scsi_status = result & MASK_SCSI_STATUS;
+		result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+		break;
+	case OCS_ABORTED:
+		result |= DID_ABORT << 16;
+		break;
+	case OCS_INVALID_CMD_TABLE_ATTR:
+	case OCS_INVALID_PRDT_ATTR:
+	case OCS_MISMATCH_DATA_BUF_SIZE:
+	case OCS_MISMATCH_RESP_UPIU_SIZE:
+	case OCS_PEER_COMM_FAILURE:
+	case OCS_FATAL_ERROR:
+	default:
+		result |= DID_ERROR << 16;
+		dev_err(&hba->pdev->dev,
+		"OCS error from controller = %x\n", ocs);
+		break;
+	} /* end of switch */
+
+	return result;
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+	struct ufshcd_lrb *lrb;
+	unsigned long completed_reqs;
+	u32 tr_doorbell;
+	int result;
+	int index;
+
+	lrb = hba->lrb;
+	tr_doorbell =
+		readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+	for (index = 0; index < hba->nutrs; index++) {
+		if (test_bit(index, &completed_reqs)) {
+
+			result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
+
+			if (lrb[index].cmd) {
+				scsi_dma_unmap(lrb[index].cmd);
+				lrb[index].cmd->result = result;
+				lrb[index].cmd->scsi_done(lrb[index].cmd);
+
+				/* Mark completed command as NULL in LRB */
+				lrb[index].cmd = NULL;
+			}
+		} /* end of if */
+	} /* end of for */
+
+	/* clear corresponding bits of completed commands */
+	hba->outstanding_reqs ^= completed_reqs;
+
+	/* Reset interrupt aggregation counters */
+	ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+}
+
+/**
+ * ufshcd_uic_cc_handler - handle UIC command completion
+ * @work: pointer to a work queue structure
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static void ufshcd_uic_cc_handler (struct work_struct *work)
+{
+	struct ufs_hba *hba;
+
+	hba = container_of(work, struct ufs_hba, uic_workq);
+
+	if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
+	    !(ufshcd_get_uic_cmd_result(hba))) {
+
+		if (ufshcd_make_hba_operational(hba))
+			dev_err(&hba->pdev->dev,
+				"cc: hba not operational state\n");
+		return;
+	}
+}
+
+/**
+ * ufshcd_fatal_err_handler - handle fatal errors
+ * @hba: per adapter instance
+ */
+static void ufshcd_fatal_err_handler(struct work_struct *work)
+{
+	struct ufs_hba *hba;
+	hba = container_of(work, struct ufs_hba, feh_workq);
+
+	/* check if reset is already in progress */
+	if (hba->ufshcd_state != UFSHCD_STATE_RESET)
+		ufshcd_do_reset(hba);
+}
+
+/**
+ * ufshcd_err_handler - Check for fatal errors
+ * @work: pointer to a work queue structure
+ */
+static void ufshcd_err_handler(struct ufs_hba *hba)
+{
+	u32 reg;
+
+	if (hba->errors & INT_FATAL_ERRORS)
+		goto fatal_eh;
+
+	if (hba->errors & UIC_ERROR) {
+
+		reg = readl(hba->mmio_base +
+			    REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+		if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+			goto fatal_eh;
+	}
+	return;
+fatal_eh:
+	hba->ufshcd_state = UFSHCD_STATE_ERROR;
+	schedule_work(&hba->feh_workq);
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+	u32 tm_doorbell;
+
+	tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL);
+	hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+	wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+	hba->errors = UFSHCD_ERROR_MASK & intr_status;
+	if (hba->errors)
+		ufshcd_err_handler(hba);
+
+	if (intr_status & UIC_COMMAND_COMPL)
+		schedule_work(&hba->uic_workq);
+
+	if (intr_status & UTP_TASK_REQ_COMPL)
+		ufshcd_tmc_handler(hba);
+
+	if (intr_status & UTP_TRANSFER_REQ_COMPL)
+		ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ *		IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+	u32 intr_status;
+	irqreturn_t retval = IRQ_NONE;
+	struct ufs_hba *hba = __hba;
+
+	spin_lock(hba->host->host_lock);
+	intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS);
+
+	if (intr_status) {
+		ufshcd_sl_intr(hba, intr_status);
+
+		/* If UFSHCI 1.0 then clear interrupt status register */
+		if (hba->ufs_version == UFSHCI_VERSION_10)
+			writel(intr_status,
+			       (hba->mmio_base + REG_INTERRUPT_STATUS));
+		retval = IRQ_HANDLED;
+	}
+	spin_unlock(hba->host->host_lock);
+	return retval;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int
+ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+		    struct ufshcd_lrb *lrbp,
+		    u8 tm_function)
+{
+	struct utp_task_req_desc *task_req_descp;
+	struct utp_upiu_task_req *task_req_upiup;
+	struct Scsi_Host *host;
+	unsigned long flags;
+	int free_slot = 0;
+	int err;
+
+	host = hba->host;
+
+	spin_lock_irqsave(host->host_lock, flags);
+
+	/* If task management queue is full */
+	free_slot = ufshcd_get_tm_free_slot(hba);
+	if (free_slot >= hba->nutmrs) {
+		spin_unlock_irqrestore(host->host_lock, flags);
+		dev_err(&hba->pdev->dev, "Task management queue full\n");
+		err = FAILED;
+		goto out;
+	}
+
+	task_req_descp = hba->utmrdl_base_addr;
+	task_req_descp += free_slot;
+
+	/* Configure task request descriptor */
+	task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+	task_req_descp->header.dword_2 =
+			cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+	/* Configure task request UPIU */
+	task_req_upiup =
+		(struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+	task_req_upiup->header.dword_0 =
+		cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+					      lrbp->lun, lrbp->task_tag));
+	task_req_upiup->header.dword_1 =
+	cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
+
+	task_req_upiup->input_param1 = lrbp->lun;
+	task_req_upiup->input_param1 =
+		cpu_to_be32(task_req_upiup->input_param1);
+	task_req_upiup->input_param2 = lrbp->task_tag;
+	task_req_upiup->input_param2 =
+		cpu_to_be32(task_req_upiup->input_param2);
+
+	/* send command to the controller */
+	__set_bit(free_slot, &hba->outstanding_tasks);
+	writel((1 << free_slot),
+	       (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL));
+
+	spin_unlock_irqrestore(host->host_lock, flags);
+
+	/* wait until the task management command is completed */
+	err =
+	wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
+					 (test_bit(free_slot,
+					 &hba->tm_condition) != 0),
+					 60 * HZ);
+	if (!err) {
+		dev_err(&hba->pdev->dev,
+			"Task management command timed-out\n");
+		err = FAILED;
+		goto out;
+	}
+	clear_bit(free_slot, &hba->tm_condition);
+	return ufshcd_task_req_compl(hba, free_slot);
+out:
+	return err;
+}
+
+/**
+ * ufshcd_device_reset - reset device and abort all the pending commands
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_device_reset(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *host;
+	struct ufs_hba *hba;
+	unsigned int tag;
+	u32 pos;
+	int err;
+
+	host = cmd->device->host;
+	hba = shost_priv(host);
+	tag = cmd->request->tag;
+
+	err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
+	if (err)
+		goto out;
+
+	for (pos = 0; pos < hba->nutrs; pos++) {
+		if (test_bit(pos, &hba->outstanding_reqs) &&
+		    (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
+
+			/* clear the respective UTRLCLR register bit */
+			ufshcd_utrl_clear(hba, pos);
+
+			clear_bit(pos, &hba->outstanding_reqs);
+
+			if (hba->lrb[pos].cmd) {
+				scsi_dma_unmap(hba->lrb[pos].cmd);
+				hba->lrb[pos].cmd->result =
+						DID_ABORT << 16;
+				hba->lrb[pos].cmd->scsi_done(cmd);
+				hba->lrb[pos].cmd = NULL;
+			}
+		}
+	} /* end of for */
+out:
+	return err;
+}
+
+/**
+ * ufshcd_host_reset - Main reset function registered with scsi layer
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_host_reset(struct scsi_cmnd *cmd)
+{
+	struct ufs_hba *hba;
+
+	hba = shost_priv(cmd->device->host);
+
+	if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+		return SUCCESS;
+
+	return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *host;
+	struct ufs_hba *hba;
+	unsigned long flags;
+	unsigned int tag;
+	int err;
+
+	host = cmd->device->host;
+	hba = shost_priv(host);
+	tag = cmd->request->tag;
+
+	spin_lock_irqsave(host->host_lock, flags);
+
+	/* check if command is still pending */
+	if (!(test_bit(tag, &hba->outstanding_reqs))) {
+		err = FAILED;
+		spin_unlock_irqrestore(host->host_lock, flags);
+		goto out;
+	}
+	spin_unlock_irqrestore(host->host_lock, flags);
+
+	err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
+	if (err)
+		goto out;
+
+	scsi_dma_unmap(cmd);
+
+	spin_lock_irqsave(host->host_lock, flags);
+
+	/* clear the respective UTRLCLR register bit */
+	ufshcd_utrl_clear(hba, tag);
+
+	__clear_bit(tag, &hba->outstanding_reqs);
+	hba->lrb[tag].cmd = NULL;
+	spin_unlock_irqrestore(host->host_lock, flags);
+out:
+	return err;
+}
+
+static struct scsi_host_template ufshcd_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= UFSHCD,
+	.proc_name		= UFSHCD,
+	.queuecommand		= ufshcd_queuecommand,
+	.slave_alloc		= ufshcd_slave_alloc,
+	.slave_destroy		= ufshcd_slave_destroy,
+	.eh_abort_handler	= ufshcd_abort,
+	.eh_device_reset_handler = ufshcd_device_reset,
+	.eh_host_reset_handler	= ufshcd_host_reset,
+	.this_id		= -1,
+	.sg_tablesize		= SG_ALL,
+	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
+	.can_queue		= UFSHCD_CAN_QUEUE,
+};
+
+/**
+ * ufshcd_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_shutdown(struct pci_dev *pdev)
+{
+	ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	/*
+	 * TODO:
+	 * 1. Block SCSI requests from SCSI midlayer
+	 * 2. Change the internal driver state to non operational
+	 * 3. Set UTRLRSR and UTMRLRSR bits to zero
+	 * 4. Wait until outstanding commands are completed
+	 * 5. Set HCE to zero to send the UFS host controller to reset state
+	 */
+
+	return -ENOSYS;
+}
+
+/**
+ * ufshcd_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_resume(struct pci_dev *pdev)
+{
+	/*
+	 * TODO:
+	 * 1. Set HCE to 1, to start the UFS host controller
+	 * initialization process
+	 * 2. Set UTRLRSR and UTMRLRSR bits to 1
+	 * 3. Change the internal driver state to operational
+	 * 4. Unblock SCSI requests from SCSI midlayer
+	 */
+
+	return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_hba_free - free allocated memory for
+ *			host memory space data structures
+ * @hba: per adapter instance
+ */
+static void ufshcd_hba_free(struct ufs_hba *hba)
+{
+	iounmap(hba->mmio_base);
+	ufshcd_free_hba_memory(hba);
+	pci_release_regions(hba->pdev);
+}
+
+/**
+ * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ *		data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_remove(struct pci_dev *pdev)
+{
+	struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+	/* disable interrupts */
+	ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
+	free_irq(pdev->irq, hba);
+
+	ufshcd_hba_stop(hba);
+	ufshcd_hba_free(hba);
+
+	scsi_remove_host(hba->host);
+	scsi_host_put(hba->host);
+	pci_set_drvdata(pdev, NULL);
+	pci_clear_master(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ *			 addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+	int err;
+	u64 dma_mask;
+
+	/*
+	 * If controller supports 64 bit addressing mode, then set the DMA
+	 * mask to 64-bit, else set the DMA mask to 32-bit
+	 */
+	if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
+		dma_mask = DMA_BIT_MASK(64);
+	else
+		dma_mask = DMA_BIT_MASK(32);
+
+	err = pci_set_dma_mask(hba->pdev, dma_mask);
+	if (err)
+		return err;
+
+	err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
+
+	return err;
+}
+
+/**
+ * ufshcd_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int __devinit
+ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct Scsi_Host *host;
+	struct ufs_hba *hba;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed\n");
+		goto out_error;
+	}
+
+	pci_set_master(pdev);
+
+	host = scsi_host_alloc(&ufshcd_driver_template,
+				sizeof(struct ufs_hba));
+	if (!host) {
+		dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+		err = -ENOMEM;
+		goto out_disable;
+	}
+	hba = shost_priv(host);
+
+	err = pci_request_regions(pdev, UFSHCD);
+	if (err < 0) {
+		dev_err(&pdev->dev, "request regions failed\n");
+		goto out_disable;
+	}
+
+	hba->mmio_base = pci_ioremap_bar(pdev, 0);
+	if (!hba->mmio_base) {
+		dev_err(&pdev->dev, "memory map failed\n");
+		err = -ENOMEM;
+		goto out_release_regions;
+	}
+
+	hba->host = host;
+	hba->pdev = pdev;
+
+	/* Read capabilities registers */
+	ufshcd_hba_capabilities(hba);
+
+	/* Get UFS version supported by the controller */
+	hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+	err = ufshcd_set_dma_mask(hba);
+	if (err) {
+		dev_err(&pdev->dev, "set dma mask failed\n");
+		goto out_iounmap;
+	}
+
+	/* Allocate memory for host memory space */
+	err = ufshcd_memory_alloc(hba);
+	if (err) {
+		dev_err(&pdev->dev, "Memory allocation failed\n");
+		goto out_iounmap;
+	}
+
+	/* Configure LRB */
+	ufshcd_host_memory_configure(hba);
+
+	host->can_queue = hba->nutrs;
+	host->cmd_per_lun = hba->nutrs;
+	host->max_id = UFSHCD_MAX_ID;
+	host->max_lun = UFSHCD_MAX_LUNS;
+	host->max_channel = UFSHCD_MAX_CHANNEL;
+	host->unique_id = host->host_no;
+	host->max_cmd_len = MAX_CDB_SIZE;
+
+	/* Initailize wait queue for task management */
+	init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
+
+	/* Initialize work queues */
+	INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
+	INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+
+	/* IRQ registration */
+	err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+	if (err) {
+		dev_err(&pdev->dev, "request irq failed\n");
+		goto out_lrb_free;
+	}
+
+	/* Enable SCSI tag mapping */
+	err = scsi_init_shared_tag_map(host, host->can_queue);
+	if (err) {
+		dev_err(&pdev->dev, "init shared queue failed\n");
+		goto out_free_irq;
+	}
+
+	pci_set_drvdata(pdev, hba);
+
+	err = scsi_add_host(host, &pdev->dev);
+	if (err) {
+		dev_err(&pdev->dev, "scsi_add_host failed\n");
+		goto out_free_irq;
+	}
+
+	/* Initialization routine */
+	err = ufshcd_initialize_hba(hba);
+	if (err) {
+		dev_err(&pdev->dev, "Initialization failed\n");
+		goto out_free_irq;
+	}
+
+	return 0;
+
+out_free_irq:
+	free_irq(pdev->irq, hba);
+out_lrb_free:
+	ufshcd_free_hba_memory(hba);
+out_iounmap:
+	iounmap(hba->mmio_base);
+out_release_regions:
+	pci_release_regions(pdev);
+out_disable:
+	scsi_host_put(host);
+	pci_clear_master(pdev);
+	pci_disable_device(pdev);
+out_error:
+	return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+	{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ }	/* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+	.name = UFSHCD,
+	.id_table = ufshcd_pci_tbl,
+	.probe = ufshcd_probe,
+	.remove = __devexit_p(ufshcd_remove),
+	.shutdown = ufshcd_shutdown,
+#ifdef CONFIG_PM
+	.suspend = ufshcd_suspend,
+	.resume = ufshcd_resume,
+#endif
+};
+
+/**
+ * ufshcd_init - Driver registration routine
+ */
+static int __init ufshcd_init(void)
+{
+	return pci_register_driver(&ufshcd_pci_driver);
+}
+module_init(ufshcd_init);
+
+/**
+ * ufshcd_exit - Driver exit clean-up routine
+ */
+static void __exit ufshcd_exit(void)
+{
+	pci_unregister_driver(&ufshcd_pci_driver);
+}
+module_exit(ufshcd_exit);
+
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
+	      "Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
new file mode 100644
index 0000000..6e3510f
--- /dev/null
+++ b/drivers/scsi/ufs/ufshci.h
@@ -0,0 +1,376 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshci.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#ifndef _UFSHCI_H
+#define _UFSHCI_H
+
+enum {
+	TASK_REQ_UPIU_SIZE_DWORDS	= 8,
+	TASK_RSP_UPIU_SIZE_DWORDS	= 8,
+	ALIGNED_UPIU_SIZE		= 128,
+};
+
+/* UFSHCI Registers */
+enum {
+	REG_CONTROLLER_CAPABILITIES		= 0x00,
+	REG_UFS_VERSION				= 0x08,
+	REG_CONTROLLER_DEV_ID			= 0x10,
+	REG_CONTROLLER_PROD_ID			= 0x14,
+	REG_INTERRUPT_STATUS			= 0x20,
+	REG_INTERRUPT_ENABLE			= 0x24,
+	REG_CONTROLLER_STATUS			= 0x30,
+	REG_CONTROLLER_ENABLE			= 0x34,
+	REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER	= 0x38,
+	REG_UIC_ERROR_CODE_DATA_LINK_LAYER	= 0x3C,
+	REG_UIC_ERROR_CODE_NETWORK_LAYER	= 0x40,
+	REG_UIC_ERROR_CODE_TRANSPORT_LAYER	= 0x44,
+	REG_UIC_ERROR_CODE_DME			= 0x48,
+	REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL	= 0x4C,
+	REG_UTP_TRANSFER_REQ_LIST_BASE_L	= 0x50,
+	REG_UTP_TRANSFER_REQ_LIST_BASE_H	= 0x54,
+	REG_UTP_TRANSFER_REQ_DOOR_BELL		= 0x58,
+	REG_UTP_TRANSFER_REQ_LIST_CLEAR		= 0x5C,
+	REG_UTP_TRANSFER_REQ_LIST_RUN_STOP	= 0x60,
+	REG_UTP_TASK_REQ_LIST_BASE_L		= 0x70,
+	REG_UTP_TASK_REQ_LIST_BASE_H		= 0x74,
+	REG_UTP_TASK_REQ_DOOR_BELL		= 0x78,
+	REG_UTP_TASK_REQ_LIST_CLEAR		= 0x7C,
+	REG_UTP_TASK_REQ_LIST_RUN_STOP		= 0x80,
+	REG_UIC_COMMAND				= 0x90,
+	REG_UIC_COMMAND_ARG_1			= 0x94,
+	REG_UIC_COMMAND_ARG_2			= 0x98,
+	REG_UIC_COMMAND_ARG_3			= 0x9C,
+};
+
+/* Controller capability masks */
+enum {
+	MASK_TRANSFER_REQUESTS_SLOTS		= 0x0000001F,
+	MASK_TASK_MANAGEMENT_REQUEST_SLOTS	= 0x00070000,
+	MASK_64_ADDRESSING_SUPPORT		= 0x01000000,
+	MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT	= 0x02000000,
+	MASK_UIC_DME_TEST_MODE_SUPPORT		= 0x04000000,
+};
+
+/* UFS Version 08h */
+#define MINOR_VERSION_NUM_MASK		UFS_MASK(0xFFFF, 0)
+#define MAJOR_VERSION_NUM_MASK		UFS_MASK(0xFFFF, 16)
+
+/* Controller UFSHCI version */
+enum {
+	UFSHCI_VERSION_10 = 0x00010000,
+	UFSHCI_VERSION_11 = 0x00010100,
+};
+
+/*
+ * HCDDID - Host Controller Identification Descriptor
+ *	  - Device ID and Device Class 10h
+ */
+#define DEVICE_CLASS	UFS_MASK(0xFFFF, 0)
+#define DEVICE_ID	UFS_MASK(0xFF, 24)
+
+/*
+ * HCPMID - Host Controller Identification Descriptor
+ *	  - Product/Manufacturer ID  14h
+ */
+#define MANUFACTURE_ID_MASK	UFS_MASK(0xFFFF, 0)
+#define PRODUCT_ID_MASK		UFS_MASK(0xFFFF, 16)
+
+#define UFS_BIT(x)	(1L << (x))
+
+#define UTP_TRANSFER_REQ_COMPL			UFS_BIT(0)
+#define UIC_DME_END_PT_RESET			UFS_BIT(1)
+#define UIC_ERROR				UFS_BIT(2)
+#define UIC_TEST_MODE				UFS_BIT(3)
+#define UIC_POWER_MODE				UFS_BIT(4)
+#define UIC_HIBERNATE_EXIT			UFS_BIT(5)
+#define UIC_HIBERNATE_ENTER			UFS_BIT(6)
+#define UIC_LINK_LOST				UFS_BIT(7)
+#define UIC_LINK_STARTUP			UFS_BIT(8)
+#define UTP_TASK_REQ_COMPL			UFS_BIT(9)
+#define UIC_COMMAND_COMPL			UFS_BIT(10)
+#define DEVICE_FATAL_ERROR			UFS_BIT(11)
+#define CONTROLLER_FATAL_ERROR			UFS_BIT(16)
+#define SYSTEM_BUS_FATAL_ERROR			UFS_BIT(17)
+
+#define UFSHCD_ERROR_MASK	(UIC_ERROR |\
+				DEVICE_FATAL_ERROR |\
+				CONTROLLER_FATAL_ERROR |\
+				SYSTEM_BUS_FATAL_ERROR)
+
+#define INT_FATAL_ERRORS	(DEVICE_FATAL_ERROR |\
+				CONTROLLER_FATAL_ERROR |\
+				SYSTEM_BUS_FATAL_ERROR)
+
+/* HCS - Host Controller Status 30h */
+#define DEVICE_PRESENT				UFS_BIT(0)
+#define UTP_TRANSFER_REQ_LIST_READY		UFS_BIT(1)
+#define UTP_TASK_REQ_LIST_READY			UFS_BIT(2)
+#define UIC_COMMAND_READY			UFS_BIT(3)
+#define HOST_ERROR_INDICATOR			UFS_BIT(4)
+#define DEVICE_ERROR_INDICATOR			UFS_BIT(5)
+#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK	UFS_MASK(0x7, 8)
+
+/* HCE - Host Controller Enable 34h */
+#define CONTROLLER_ENABLE	UFS_BIT(0)
+#define CONTROLLER_DISABLE	0x0
+
+/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
+#define UIC_PHY_ADAPTER_LAYER_ERROR			UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK		0x1F
+
+/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
+#define UIC_DATA_LINK_LAYER_ERROR		UFS_BIT(31)
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK	0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT	0x2000
+
+/* UECN - Host UIC Error Code Network Layer 40h */
+#define UIC_NETWORK_LAYER_ERROR			UFS_BIT(31)
+#define UIC_NETWORK_LAYER_ERROR_CODE_MASK	0x7
+
+/* UECT - Host UIC Error Code Transport Layer 44h */
+#define UIC_TRANSPORT_LAYER_ERROR		UFS_BIT(31)
+#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK	0x7F
+
+/* UECDME - Host UIC Error Code DME 48h */
+#define UIC_DME_ERROR			UFS_BIT(31)
+#define UIC_DME_ERROR_CODE_MASK		0x1
+
+#define INT_AGGR_TIMEOUT_VAL_MASK		0xFF
+#define INT_AGGR_COUNTER_THRESHOLD_MASK		UFS_MASK(0x1F, 8)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET	UFS_BIT(16)
+#define INT_AGGR_STATUS_BIT			UFS_BIT(20)
+#define INT_AGGR_PARAM_WRITE			UFS_BIT(24)
+#define INT_AGGR_ENABLE				UFS_BIT(31)
+
+/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT	UFS_BIT(0)
+
+/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT		UFS_BIT(0)
+
+/* UICCMD - UIC Command */
+#define COMMAND_OPCODE_MASK		0xFF
+#define GEN_SELECTOR_INDEX_MASK		0xFFFF
+
+#define MIB_ATTRIBUTE_MASK		UFS_MASK(0xFFFF, 16)
+#define RESET_LEVEL			0xFF
+
+#define ATTR_SET_TYPE_MASK		UFS_MASK(0xFF, 16)
+#define CONFIG_RESULT_CODE_MASK		0xFF
+#define GENERIC_ERROR_CODE_MASK		0xFF
+
+/* UIC Commands */
+enum {
+	UIC_CMD_DME_GET			= 0x01,
+	UIC_CMD_DME_SET			= 0x02,
+	UIC_CMD_DME_PEER_GET		= 0x03,
+	UIC_CMD_DME_PEER_SET		= 0x04,
+	UIC_CMD_DME_POWERON		= 0x10,
+	UIC_CMD_DME_POWEROFF		= 0x11,
+	UIC_CMD_DME_ENABLE		= 0x12,
+	UIC_CMD_DME_RESET		= 0x14,
+	UIC_CMD_DME_END_PT_RST		= 0x15,
+	UIC_CMD_DME_LINK_STARTUP	= 0x16,
+	UIC_CMD_DME_HIBER_ENTER		= 0x17,
+	UIC_CMD_DME_HIBER_EXIT		= 0x18,
+	UIC_CMD_DME_TEST_MODE		= 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+	UIC_CMD_RESULT_SUCCESS			= 0x00,
+	UIC_CMD_RESULT_INVALID_ATTR		= 0x01,
+	UIC_CMD_RESULT_FAILURE			= 0x01,
+	UIC_CMD_RESULT_INVALID_ATTR_VALUE	= 0x02,
+	UIC_CMD_RESULT_READ_ONLY_ATTR		= 0x03,
+	UIC_CMD_RESULT_WRITE_ONLY_ATTR		= 0x04,
+	UIC_CMD_RESULT_BAD_INDEX		= 0x05,
+	UIC_CMD_RESULT_LOCKED_ATTR		= 0x06,
+	UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX	= 0x07,
+	UIC_CMD_RESULT_PEER_COMM_FAILURE	= 0x08,
+	UIC_CMD_RESULT_BUSY			= 0x09,
+	UIC_CMD_RESULT_DME_FAILURE		= 0x0A,
+};
+
+#define MASK_UIC_COMMAND_RESULT			0xFF
+
+#define INT_AGGR_COUNTER_THRESHOLD_VALUE	(0x1F << 8)
+#define INT_AGGR_TIMEOUT_VALUE			(0x02)
+
+/* Interrupt disable masks */
+enum {
+	/* Interrupt disable mask for UFSHCI v1.0 */
+	INTERRUPT_DISABLE_MASK_10	= 0xFFFF,
+
+	/* Interrupt disable mask for UFSHCI v1.1 */
+	INTERRUPT_DISABLE_MASK_11	= 0x0,
+};
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+	UTP_CMD_TYPE_SCSI		= 0x0,
+	UTP_CMD_TYPE_UFS		= 0x1,
+	UTP_CMD_TYPE_DEV_MANAGE		= 0x2,
+};
+
+enum {
+	UTP_SCSI_COMMAND		= 0x00000000,
+	UTP_NATIVE_UFS_COMMAND		= 0x10000000,
+	UTP_DEVICE_MANAGEMENT_FUNCTION	= 0x20000000,
+	UTP_REQ_DESC_INT_CMD		= 0x01000000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+	UTP_NO_DATA_TRANSFER	= 0x00000000,
+	UTP_HOST_TO_DEVICE	= 0x02000000,
+	UTP_DEVICE_TO_HOST	= 0x04000000,
+};
+
+/* Overall command status values */
+enum {
+	OCS_SUCCESS			= 0x0,
+	OCS_INVALID_CMD_TABLE_ATTR	= 0x1,
+	OCS_INVALID_PRDT_ATTR		= 0x2,
+	OCS_MISMATCH_DATA_BUF_SIZE	= 0x3,
+	OCS_MISMATCH_RESP_UPIU_SIZE	= 0x4,
+	OCS_PEER_COMM_FAILURE		= 0x5,
+	OCS_ABORTED			= 0x6,
+	OCS_FATAL_ERROR			= 0x7,
+	OCS_INVALID_COMMAND_STATUS	= 0x0F,
+	MASK_OCS			= 0x0F,
+};
+
+/**
+ * struct ufshcd_sg_entry - UFSHCI PRD Entry
+ * @base_addr: Lower 32bit physical address DW-0
+ * @upper_addr: Upper 32bit physical address DW-1
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+struct ufshcd_sg_entry {
+	u32    base_addr;
+	u32    upper_addr;
+	u32    reserved;
+	u32    size;
+};
+
+/**
+ * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * @command_upiu: Command UPIU Frame address
+ * @response_upiu: Response UPIU Frame address
+ * @prd_table: Physical Region Descriptor
+ */
+struct utp_transfer_cmd_desc {
+	u8 command_upiu[ALIGNED_UPIU_SIZE];
+	u8 response_upiu[ALIGNED_UPIU_SIZE];
+	struct ufshcd_sg_entry    prd_table[SG_ALL];
+};
+
+/**
+ * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+struct request_desc_header {
+	u32 dword_0;
+	u32 dword_1;
+	u32 dword_2;
+	u32 dword_3;
+};
+
+/**
+ * struct utp_transfer_req_desc - UTRD structure
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+struct utp_transfer_req_desc {
+
+	/* DW 0-3 */
+	struct request_desc_header header;
+
+	/* DW 4-5*/
+	u32  command_desc_base_addr_lo;
+	u32  command_desc_base_addr_hi;
+
+	/* DW 6 */
+	u16  response_upiu_length;
+	u16  response_upiu_offset;
+
+	/* DW 7 */
+	u16  prd_table_length;
+	u16  prd_table_offset;
+};
+
+/**
+ * struct utp_task_req_desc - UTMRD structure
+ * @header: UTMRD header DW-0 to DW-3
+ * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
+ * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+ */
+struct utp_task_req_desc {
+
+	/* DW 0-3 */
+	struct request_desc_header header;
+
+	/* DW 4-11 */
+	u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+
+	/* DW 12-19 */
+	u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 7264116..4411d42 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -17,7 +17,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  *
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
  *
  */
 
@@ -1178,11 +1178,67 @@
 	return 0;
 }
 
+/*
+ * Query the device, fetch the config info and return the
+ * maximum number of targets on the adapter. In case of
+ * failure due to any reason return default i.e. 16.
+ */
+static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
+{
+	struct PVSCSICmdDescConfigCmd cmd;
+	struct PVSCSIConfigPageHeader *header;
+	struct device *dev;
+	dma_addr_t configPagePA;
+	void *config_page;
+	u32 numPhys = 16;
+
+	dev = pvscsi_dev(adapter);
+	config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+					   &configPagePA);
+	if (!config_page) {
+		dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
+		goto exit;
+	}
+	BUG_ON(configPagePA & ~PAGE_MASK);
+
+	/* Fetch config info from the device. */
+	cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
+	cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
+	cmd.cmpAddr = configPagePA;
+	cmd._pad = 0;
+
+	/*
+	 * Mark the completion page header with error values. If the device
+	 * completes the command successfully, it sets the status values to
+	 * indicate success.
+	 */
+	header = config_page;
+	memset(header, 0, sizeof *header);
+	header->hostStatus = BTSTAT_INVPARAM;
+	header->scsiStatus = SDSTAT_CHECK;
+
+	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
+
+	if (header->hostStatus == BTSTAT_SUCCESS &&
+	    header->scsiStatus == SDSTAT_GOOD) {
+		struct PVSCSIConfigPageController *config;
+
+		config = config_page;
+		numPhys = config->numPhys;
+	} else
+		dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
+			 header->hostStatus, header->scsiStatus);
+	pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+exit:
+	return numPhys;
+}
+
 static int __devinit pvscsi_probe(struct pci_dev *pdev,
 				  const struct pci_device_id *id)
 {
 	struct pvscsi_adapter *adapter;
 	struct Scsi_Host *host;
+	struct device *dev;
 	unsigned int i;
 	unsigned long flags = 0;
 	int error;
@@ -1272,6 +1328,13 @@
 	}
 
 	/*
+	 * Ask the device for max number of targets.
+	 */
+	host->max_id = pvscsi_get_max_targets(adapter);
+	dev = pvscsi_dev(adapter);
+	dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
+
+	/*
 	 * From this point on we should reset the adapter if anything goes
 	 * wrong.
 	 */
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 62e36e7..3546e86 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -17,7 +17,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  *
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
  *
  */
 
@@ -26,7 +26,7 @@
 
 #include <linux/types.h>
 
-#define PVSCSI_DRIVER_VERSION_STRING   "1.0.1.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING   "1.0.2.0-k"
 
 #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
 
@@ -39,28 +39,45 @@
  * host adapter status/error codes
  */
 enum HostBusAdapterStatus {
-   BTSTAT_SUCCESS       = 0x00,  /* CCB complete normally with no errors */
-   BTSTAT_LINKED_COMMAND_COMPLETED           = 0x0a,
-   BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
-   BTSTAT_DATA_UNDERRUN = 0x0c,
-   BTSTAT_SELTIMEO      = 0x11,  /* SCSI selection timeout */
-   BTSTAT_DATARUN       = 0x12,  /* data overrun/underrun */
-   BTSTAT_BUSFREE       = 0x13,  /* unexpected bus free */
-   BTSTAT_INVPHASE      = 0x14,  /* invalid bus phase or sequence requested by target */
-   BTSTAT_LUNMISMATCH   = 0x17,  /* linked CCB has different LUN from first CCB */
-   BTSTAT_SENSFAILED    = 0x1b,  /* auto request sense failed */
-   BTSTAT_TAGREJECT     = 0x1c,  /* SCSI II tagged queueing message rejected by target */
-   BTSTAT_BADMSG        = 0x1d,  /* unsupported message received by the host adapter */
-   BTSTAT_HAHARDWARE    = 0x20,  /* host adapter hardware failed */
-   BTSTAT_NORESPONSE    = 0x21,  /* target did not respond to SCSI ATN, sent a SCSI RST */
-   BTSTAT_SENTRST       = 0x22,  /* host adapter asserted a SCSI RST */
-   BTSTAT_RECVRST       = 0x23,  /* other SCSI devices asserted a SCSI RST */
-   BTSTAT_DISCONNECT    = 0x24,  /* target device reconnected improperly (w/o tag) */
-   BTSTAT_BUSRESET      = 0x25,  /* host adapter issued BUS device reset */
-   BTSTAT_ABORTQUEUE    = 0x26,  /* abort queue generated */
-   BTSTAT_HASOFTWARE    = 0x27,  /* host adapter software error */
-   BTSTAT_HATIMEOUT     = 0x30,  /* host adapter hardware timeout error */
-   BTSTAT_SCSIPARITY    = 0x34,  /* SCSI parity error detected */
+	BTSTAT_SUCCESS       = 0x00,  /* CCB complete normally with no errors */
+	BTSTAT_LINKED_COMMAND_COMPLETED           = 0x0a,
+	BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+	BTSTAT_DATA_UNDERRUN = 0x0c,
+	BTSTAT_SELTIMEO      = 0x11,  /* SCSI selection timeout */
+	BTSTAT_DATARUN       = 0x12,  /* data overrun/underrun */
+	BTSTAT_BUSFREE       = 0x13,  /* unexpected bus free */
+	BTSTAT_INVPHASE      = 0x14,  /* invalid bus phase or sequence
+				       * requested by target */
+	BTSTAT_LUNMISMATCH   = 0x17,  /* linked CCB has different LUN from
+				       * first CCB */
+	BTSTAT_INVPARAM      = 0x1a,  /* invalid parameter in CCB or segment
+				       * list */
+	BTSTAT_SENSFAILED    = 0x1b,  /* auto request sense failed */
+	BTSTAT_TAGREJECT     = 0x1c,  /* SCSI II tagged queueing message
+				       * rejected by target */
+	BTSTAT_BADMSG        = 0x1d,  /* unsupported message received by the
+				       * host adapter */
+	BTSTAT_HAHARDWARE    = 0x20,  /* host adapter hardware failed */
+	BTSTAT_NORESPONSE    = 0x21,  /* target did not respond to SCSI ATN,
+				       * sent a SCSI RST */
+	BTSTAT_SENTRST       = 0x22,  /* host adapter asserted a SCSI RST */
+	BTSTAT_RECVRST       = 0x23,  /* other SCSI devices asserted a SCSI
+				       * RST */
+	BTSTAT_DISCONNECT    = 0x24,  /* target device reconnected improperly
+				       * (w/o tag) */
+	BTSTAT_BUSRESET      = 0x25,  /* host adapter issued BUS device reset */
+	BTSTAT_ABORTQUEUE    = 0x26,  /* abort queue generated */
+	BTSTAT_HASOFTWARE    = 0x27,  /* host adapter software error */
+	BTSTAT_HATIMEOUT     = 0x30,  /* host adapter hardware timeout error */
+	BTSTAT_SCSIPARITY    = 0x34,  /* SCSI parity error detected */
+};
+
+/*
+ * SCSI device status values.
+ */
+enum ScsiDeviceStatus {
+	SDSTAT_GOOD  = 0x00, /* No errors. */
+	SDSTAT_CHECK = 0x02, /* Check condition. */
 };
 
 /*
@@ -114,6 +131,29 @@
 } __packed;
 
 /*
+ * Command descriptor for PVSCSI_CMD_CONFIG --
+ */
+
+struct PVSCSICmdDescConfigCmd {
+	u64 cmpAddr;
+	u64 configPageAddress;
+	u32 configPageNum;
+	u32 _pad;
+} __packed;
+
+enum PVSCSIConfigPageType {
+	PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
+	PVSCSI_CONFIG_PAGE_PHY        = 0x1959,
+	PVSCSI_CONFIG_PAGE_DEVICE     = 0x195a,
+};
+
+enum PVSCSIConfigPageAddressType {
+	PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120,
+	PVSCSI_CONFIG_BUSTARGET_ADDRESS  = 0x2121,
+	PVSCSI_CONFIG_PHY_ADDRESS        = 0x2122,
+};
+
+/*
  * Command descriptor for PVSCSI_CMD_ABORT_CMD --
  *
  * - currently does not support specifying the LUN.
@@ -332,6 +372,27 @@
 	u32	_pad[2];
 } __packed;
 
+struct PVSCSIConfigPageHeader {
+	u32 pageNum;
+	u16 numDwords;
+	u16 hostStatus;
+	u16 scsiStatus;
+	u16 reserved[3];
+} __packed;
+
+struct PVSCSIConfigPageController {
+	struct PVSCSIConfigPageHeader header;
+	u64 nodeWWN; /* Device name as defined in the SAS spec. */
+	u16 manufacturer[64];
+	u16 serialNumber[64];
+	u16 opromVersion[32];
+	u16 hwVersion[32];
+	u16 firmwareVersion[32];
+	u32 numPhys;
+	u8  useConsecutivePhyWWNs;
+	u8  reserved[3];
+} __packed;
+
 /*
  * Interrupt status / IRQ bits.
  */
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 082458d..d1a495f 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -63,12 +63,6 @@
 };
 
 #ifdef CONFIG_DEBUG_FS
-static int spi_show_regs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 #define SPI_REGS_BUFSIZE	1024
 static ssize_t  spi_show_regs(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
@@ -128,7 +122,7 @@
 
 static const struct file_operations mrst_spi_regs_ops = {
 	.owner		= THIS_MODULE,
-	.open		= spi_show_regs_open,
+	.open		= simple_open,
 	.read		= spi_show_regs,
 	.llseek		= default_llseek,
 };
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 052b43e..b91e4bc 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -55,7 +55,6 @@
 };
 static int lowmem_minfree_size = 4;
 
-static struct task_struct *lowmem_deathpending;
 static unsigned long lowmem_deathpending_timeout;
 
 #define lowmem_print(level, x...)			\
@@ -64,24 +63,6 @@
 			printk(x);			\
 	} while (0)
 
-static int
-task_notify_func(struct notifier_block *self, unsigned long val, void *data);
-
-static struct notifier_block task_nb = {
-	.notifier_call	= task_notify_func,
-};
-
-static int
-task_notify_func(struct notifier_block *self, unsigned long val, void *data)
-{
-	struct task_struct *task = data;
-
-	if (task == lowmem_deathpending)
-		lowmem_deathpending = NULL;
-
-	return NOTIFY_OK;
-}
-
 static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
 {
 	struct task_struct *tsk;
@@ -97,19 +78,6 @@
 	int other_file = global_page_state(NR_FILE_PAGES) -
 						global_page_state(NR_SHMEM);
 
-	/*
-	 * If we already have a death outstanding, then
-	 * bail out right away; indicating to vmscan
-	 * that we have nothing further to offer on
-	 * this pass.
-	 *
-	 * Note: Currently you need CONFIG_PROFILING
-	 * for this to work correctly.
-	 */
-	if (lowmem_deathpending &&
-	    time_before_eq(jiffies, lowmem_deathpending_timeout))
-		return 0;
-
 	if (lowmem_adj_size < array_size)
 		array_size = lowmem_adj_size;
 	if (lowmem_minfree_size < array_size)
@@ -148,6 +116,12 @@
 		if (!p)
 			continue;
 
+		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
+		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {
+			task_unlock(p);
+			rcu_read_unlock();
+			return 0;
+		}
 		oom_score_adj = p->signal->oom_score_adj;
 		if (oom_score_adj < min_score_adj) {
 			task_unlock(p);
@@ -174,15 +148,9 @@
 		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
 			     selected->pid, selected->comm,
 			     selected_oom_score_adj, selected_tasksize);
-		/*
-		 * If CONFIG_PROFILING is off, then we don't want to stall
-		 * the killer by setting lowmem_deathpending.
-		 */
-#ifdef CONFIG_PROFILING
-		lowmem_deathpending = selected;
 		lowmem_deathpending_timeout = jiffies + HZ;
-#endif
 		send_sig(SIGKILL, selected, 0);
+		set_tsk_thread_flag(selected, TIF_MEMDIE);
 		rem -= selected_tasksize;
 	}
 	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
@@ -198,7 +166,6 @@
 
 static int __init lowmem_init(void)
 {
-	task_handoff_register(&task_nb);
 	register_shrinker(&lowmem_shrinker);
 	return 0;
 }
@@ -206,7 +173,6 @@
 static void __exit lowmem_exit(void)
 {
 	unregister_shrinker(&lowmem_shrinker);
-	task_handoff_unregister(&task_nb);
 }
 
 module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/asus_oled/README b/drivers/staging/asus_oled/README
index 0d82a6d..2d72123 100644
--- a/drivers/staging/asus_oled/README
+++ b/drivers/staging/asus_oled/README
@@ -52,7 +52,7 @@
 
    There is only one option: start_off.
    You can use it by: 'modprobe asus_oled start_off=1', or by adding this
-   line to /etc/modprobe.conf:
+   line to /etc/modprobe.d/asus_oled.conf:
    options asus_oled start_off=1
 
    With this option provided, asus_oled driver will switch off the display
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 8306579..c5eb3c3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -122,6 +122,7 @@
 	/* Local sense buffer */
 	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
 	u32 was_ddp_setup:1;		/* Set only if ddp is setup */
+	u32 aborted:1;			/* Set if aborted by reset or timeout */
 	struct scatterlist *sg;		/* Set only if DDP is setup */
 	u32 sg_cnt;			/* No. of item in scatterlist */
 };
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 62dec97..a375f25 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -121,6 +121,8 @@
 	struct fc_exch *ep;
 	size_t len;
 
+	if (cmd->aborted)
+		return 0;
 	ft_dump_cmd(cmd, __func__);
 	ep = fc_seq_exch(cmd->seq);
 	lport = ep->lp;
@@ -187,6 +189,8 @@
 
 	ft_dump_cmd(cmd, __func__);
 
+	if (cmd->aborted)
+		return 0;
 	ep = fc_seq_exch(cmd->seq);
 	lport = ep->lp;
 	fp = fc_frame_alloc(lport, sizeof(*txrdy));
@@ -252,10 +256,10 @@
 	struct ft_cmd *cmd = arg;
 	struct fc_frame_header *fh;
 
-	if (IS_ERR(fp)) {
+	if (unlikely(IS_ERR(fp))) {
 		/* XXX need to find cmd if queued */
 		cmd->seq = NULL;
-		transport_generic_free_cmd(&cmd->se_cmd, 0);
+		cmd->aborted = true;
 		return;
 	}
 
@@ -399,6 +403,8 @@
 	struct se_tmr_req *tmr = se_cmd->se_tmr_req;
 	enum fcp_resp_rsp_codes code;
 
+	if (cmd->aborted)
+		return 0;
 	switch (tmr->response) {
 	case TMR_FUNCTION_COMPLETE:
 		code = FCP_TMF_CMPL;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index f357039..2948dc9 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -300,6 +300,7 @@
 {
 	struct ft_lport_acl *lacl;
 	struct ft_tpg *tpg;
+	struct workqueue_struct *wq;
 	unsigned long index;
 	int ret;
 
@@ -321,18 +322,20 @@
 	tpg->lport_acl = lacl;
 	INIT_LIST_HEAD(&tpg->lun_list);
 
-	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
-				tpg, TRANSPORT_TPG_TYPE_NORMAL);
-	if (ret < 0) {
+	wq = alloc_workqueue("tcm_fc", 0, 1);
+	if (!wq) {
 		kfree(tpg);
 		return NULL;
 	}
 
-	tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
-	if (!tpg->workqueue) {
+	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
+				tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		destroy_workqueue(wq);
 		kfree(tpg);
 		return NULL;
 	}
+	tpg->workqueue = wq;
 
 	mutex_lock(&ft_lport_lock);
 	list_add_tail(&tpg->list, &lacl->tpg_list);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 2b693ee..dc7c0db 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -81,6 +81,8 @@
 	void *from;
 	void *to = NULL;
 
+	if (cmd->aborted)
+		return 0;
 	ep = fc_seq_exch(cmd->seq);
 	lport = ep->lp;
 	cmd->seq = lport->tt.seq_start_next(cmd->seq);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7f71b2..514a691 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -18,3 +18,11 @@
 	depends on THERMAL
 	depends on HWMON=y || HWMON=THERMAL
 	default y
+
+config SPEAR_THERMAL
+	bool "SPEAr thermal sensor driver"
+	depends on THERMAL
+	depends on PLAT_SPEAR
+	help
+	  Enable this to plug the SPEAr thermal sensor driver into the Linux
+	  thermal framework
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 31108a0..a9fff0b 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_THERMAL)		+= thermal_sys.o
+obj-$(CONFIG_SPEAR_THERMAL)		+= spear_thermal.o
\ No newline at end of file
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
new file mode 100644
index 0000000..c2e32df
--- /dev/null
+++ b/drivers/thermal/spear_thermal.c
@@ -0,0 +1,206 @@
+/*
+ * SPEAr thermal driver.
+ *
+ * Copyright (C) 2011-2012 ST Microelectronics
+ * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/spear_thermal.h>
+#include <linux/thermal.h>
+
+#define MD_FACTOR	1000
+
+/* SPEAr Thermal Sensor Dev Structure */
+struct spear_thermal_dev {
+	/* pointer to base address of the thermal sensor */
+	void __iomem *thermal_base;
+	/* clk structure */
+	struct clk *clk;
+	/* pointer to thermal flags */
+	unsigned int flags;
+};
+
+static inline int thermal_get_temp(struct thermal_zone_device *thermal,
+				unsigned long *temp)
+{
+	struct spear_thermal_dev *stdev = thermal->devdata;
+
+	/*
+	 * Data are ready to be read after 628 usec from POWERDOWN signal
+	 * (PDN) = 1
+	 */
+	*temp = (readl_relaxed(stdev->thermal_base) & 0x7F) * MD_FACTOR;
+	return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+	.get_temp = thermal_get_temp,
+};
+
+#ifdef CONFIG_PM
+static int spear_thermal_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+	struct spear_thermal_dev *stdev = spear_thermal->devdata;
+	unsigned int actual_mask = 0;
+
+	/* Disable SPEAr Thermal Sensor */
+	actual_mask = readl_relaxed(stdev->thermal_base);
+	writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
+
+	clk_disable(stdev->clk);
+	dev_info(dev, "Suspended.\n");
+
+	return 0;
+}
+
+static int spear_thermal_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+	struct spear_thermal_dev *stdev = spear_thermal->devdata;
+	unsigned int actual_mask = 0;
+	int ret = 0;
+
+	ret = clk_enable(stdev->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't enable clock\n");
+		return ret;
+	}
+
+	/* Enable SPEAr Thermal Sensor */
+	actual_mask = readl_relaxed(stdev->thermal_base);
+	writel_relaxed(actual_mask | stdev->flags, stdev->thermal_base);
+
+	dev_info(dev, "Resumed.\n");
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
+		spear_thermal_resume);
+
+static int spear_thermal_probe(struct platform_device *pdev)
+{
+	struct thermal_zone_device *spear_thermal = NULL;
+	struct spear_thermal_dev *stdev;
+	struct spear_thermal_pdata *pdata;
+	int ret = 0;
+	struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!stres) {
+		dev_err(&pdev->dev, "memory resource missing\n");
+		return -ENODEV;
+	}
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform data is NULL\n");
+		return -EINVAL;
+	}
+
+	stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL);
+	if (!stdev) {
+		dev_err(&pdev->dev, "kzalloc fail\n");
+		return -ENOMEM;
+	}
+
+	/* Enable thermal sensor */
+	stdev->thermal_base = devm_ioremap(&pdev->dev, stres->start,
+			resource_size(stres));
+	if (!stdev->thermal_base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	stdev->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(stdev->clk)) {
+		dev_err(&pdev->dev, "Can't get clock\n");
+		return PTR_ERR(stdev->clk);
+	}
+
+	ret = clk_enable(stdev->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't enable clock\n");
+		goto put_clk;
+	}
+
+	stdev->flags = pdata->thermal_flags;
+	writel_relaxed(stdev->flags, stdev->thermal_base);
+
+	spear_thermal = thermal_zone_device_register("spear_thermal", 0,
+				stdev, &ops, 0, 0, 0, 0);
+	if (IS_ERR(spear_thermal)) {
+		dev_err(&pdev->dev, "thermal zone device is NULL\n");
+		ret = PTR_ERR(spear_thermal);
+		goto disable_clk;
+	}
+
+	platform_set_drvdata(pdev, spear_thermal);
+
+	dev_info(&spear_thermal->device, "Thermal Sensor Loaded at: 0x%p.\n",
+			stdev->thermal_base);
+
+	return 0;
+
+disable_clk:
+	clk_disable(stdev->clk);
+put_clk:
+	clk_put(stdev->clk);
+
+	return ret;
+}
+
+static int spear_thermal_exit(struct platform_device *pdev)
+{
+	unsigned int actual_mask = 0;
+	struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+	struct spear_thermal_dev *stdev = spear_thermal->devdata;
+
+	thermal_zone_device_unregister(spear_thermal);
+	platform_set_drvdata(pdev, NULL);
+
+	/* Disable SPEAr Thermal Sensor */
+	actual_mask = readl_relaxed(stdev->thermal_base);
+	writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
+
+	clk_disable(stdev->clk);
+	clk_put(stdev->clk);
+
+	return 0;
+}
+
+static struct platform_driver spear_thermal_driver = {
+	.probe = spear_thermal_probe,
+	.remove = spear_thermal_exit,
+	.driver = {
+		.name = "spear_thermal",
+		.owner = THIS_MODULE,
+		.pm = &spear_thermal_pm_ops,
+	},
+};
+
+module_platform_driver(spear_thermal_driver);
+
+MODULE_AUTHOR("Vincenzo Frascino <vincenzo.frascino@st.com>");
+MODULE_DESCRIPTION("SPEAr thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 220ce7e..022bacb 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -23,6 +23,8 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -39,8 +41,6 @@
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
 MODULE_LICENSE("GPL");
 
-#define PREFIX "Thermal: "
-
 struct thermal_cooling_device_instance {
 	int id;
 	char name[THERMAL_NAME_LENGTH];
@@ -60,13 +60,11 @@
 static LIST_HEAD(thermal_cdev_list);
 static DEFINE_MUTEX(thermal_list_lock);
 
-static unsigned int thermal_event_seqnum;
-
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
 	int err;
 
-      again:
+again:
 	if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
 		return -ENOMEM;
 
@@ -152,9 +150,9 @@
 	if (!tz->ops->set_mode)
 		return -EPERM;
 
-	if (!strncmp(buf, "enabled", sizeof("enabled")))
+	if (!strncmp(buf, "enabled", sizeof("enabled") - 1))
 		result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED);
-	else if (!strncmp(buf, "disabled", sizeof("disabled")))
+	else if (!strncmp(buf, "disabled", sizeof("disabled") - 1))
 		result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED);
 	else
 		result = -EINVAL;
@@ -283,8 +281,7 @@
 static DEVICE_ATTR(type, 0444, type_show, NULL);
 static DEVICE_ATTR(temp, 0444, temp_show, NULL);
 static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
-static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, \
-		   passive_store);
+static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
 
 static struct device_attribute trip_point_attrs[] = {
 	__ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
@@ -313,22 +310,6 @@
 	__ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL),
 };
 
-#define TRIP_POINT_ATTR_ADD(_dev, _index, result)     \
-do {    \
-	result = device_create_file(_dev,	\
-				&trip_point_attrs[_index * 2]);	\
-	if (result)	\
-		break;	\
-	result = device_create_file(_dev,	\
-			&trip_point_attrs[_index * 2 + 1]);	\
-} while (0)
-
-#define TRIP_POINT_ATTR_REMOVE(_dev, _index)	\
-do {	\
-	device_remove_file(_dev, &trip_point_attrs[_index * 2]);	\
-	device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]);	\
-} while (0)
-
 /* sys I/F for cooling device */
 #define to_cooling_device(_dev)	\
 	container_of(_dev, struct thermal_cooling_device, device)
@@ -835,15 +816,14 @@
 		return 0;
 
 	device_remove_file(&tz->device, &dev->attr);
-      remove_symbol_link:
+remove_symbol_link:
 	sysfs_remove_link(&tz->device.kobj, dev->name);
-      release_idr:
+release_idr:
 	release_idr(&tz->idr, &tz->lock, dev->id);
-      free_mem:
+free_mem:
 	kfree(dev);
 	return result;
 }
-
 EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
 
 /**
@@ -873,14 +853,13 @@
 
 	return -ENODEV;
 
-      unbind:
+unbind:
 	device_remove_file(&tz->device, &pos->attr);
 	sysfs_remove_link(&tz->device.kobj, pos->name);
 	release_idr(&tz->idr, &tz->lock, pos->id);
 	kfree(pos);
 	return 0;
 }
-
 EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
 
 static void thermal_release(struct device *dev)
@@ -888,7 +867,8 @@
 	struct thermal_zone_device *tz;
 	struct thermal_cooling_device *cdev;
 
-	if (!strncmp(dev_name(dev), "thermal_zone", sizeof "thermal_zone" - 1)) {
+	if (!strncmp(dev_name(dev), "thermal_zone",
+		     sizeof("thermal_zone") - 1)) {
 		tz = to_thermal_zone(dev);
 		kfree(tz);
 	} else {
@@ -908,8 +888,9 @@
  * @devdata:	device private data.
  * @ops:		standard thermal cooling devices callbacks.
  */
-struct thermal_cooling_device *thermal_cooling_device_register(
-     char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+				const struct thermal_cooling_device_ops *ops)
 {
 	struct thermal_cooling_device *cdev;
 	struct thermal_zone_device *pos;
@@ -974,12 +955,11 @@
 	if (!result)
 		return cdev;
 
-      unregister:
+unregister:
 	release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
 	device_unregister(&cdev->device);
 	return ERR_PTR(result);
 }
-
 EXPORT_SYMBOL(thermal_cooling_device_register);
 
 /**
@@ -1024,7 +1004,6 @@
 	device_unregister(&cdev->device);
 	return;
 }
-
 EXPORT_SYMBOL(thermal_cooling_device_unregister);
 
 /**
@@ -1044,8 +1023,7 @@
 
 	if (tz->ops->get_temp(tz, &temp)) {
 		/* get_temp failed - retry it later */
-		printk(KERN_WARNING PREFIX "failed to read out thermal zone "
-		       "%d\n", tz->id);
+		pr_warn("failed to read out thermal zone %d\n", tz->id);
 		goto leave;
 	}
 
@@ -1060,9 +1038,8 @@
 					ret = tz->ops->notify(tz, count,
 							      trip_type);
 				if (!ret) {
-					printk(KERN_EMERG
-					       "Critical temperature reached (%ld C), shutting down.\n",
-					       temp/1000);
+					pr_emerg("Critical temperature reached (%ld C), shutting down\n",
+						 temp/1000);
 					orderly_poweroff(true);
 				}
 			}
@@ -1100,7 +1077,7 @@
 
 	tz->last_temperature = temp;
 
-      leave:
+leave:
 	if (tz->passive)
 		thermal_zone_device_set_polling(tz, tz->passive_delay);
 	else if (tz->polling_delay)
@@ -1199,7 +1176,12 @@
 	}
 
 	for (count = 0; count < trips; count++) {
-		TRIP_POINT_ATTR_ADD(&tz->device, count, result);
+		result = device_create_file(&tz->device,
+					    &trip_point_attrs[count * 2]);
+		if (result)
+			break;
+		result = device_create_file(&tz->device,
+					    &trip_point_attrs[count * 2 + 1]);
 		if (result)
 			goto unregister;
 		tz->ops->get_trip_type(tz, count, &trip_type);
@@ -1235,12 +1217,11 @@
 	if (!result)
 		return tz;
 
-      unregister:
+unregister:
 	release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
 	device_unregister(&tz->device);
 	return ERR_PTR(result);
 }
-
 EXPORT_SYMBOL(thermal_zone_device_register);
 
 /**
@@ -1279,9 +1260,12 @@
 	if (tz->ops->get_mode)
 		device_remove_file(&tz->device, &dev_attr_mode);
 
-	for (count = 0; count < tz->trips; count++)
-		TRIP_POINT_ATTR_REMOVE(&tz->device, count);
-
+	for (count = 0; count < tz->trips; count++) {
+		device_remove_file(&tz->device,
+				   &trip_point_attrs[count * 2]);
+		device_remove_file(&tz->device,
+				   &trip_point_attrs[count * 2 + 1]);
+	}
 	thermal_remove_hwmon_sysfs(tz);
 	release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
 	idr_destroy(&tz->idr);
@@ -1289,7 +1273,6 @@
 	device_unregister(&tz->device);
 	return;
 }
-
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
 #ifdef CONFIG_NET
@@ -1312,10 +1295,11 @@
 	void *msg_header;
 	int size;
 	int result;
+	static unsigned int thermal_event_seqnum;
 
 	/* allocate memory */
-	size = nla_total_size(sizeof(struct thermal_genl_event)) + \
-				nla_total_size(0);
+	size = nla_total_size(sizeof(struct thermal_genl_event)) +
+	       nla_total_size(0);
 
 	skb = genlmsg_new(size, GFP_ATOMIC);
 	if (!skb)
@@ -1331,8 +1315,8 @@
 	}
 
 	/* fill the data */
-	attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
-			sizeof(struct thermal_genl_event));
+	attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT,
+			   sizeof(struct thermal_genl_event));
 
 	if (!attr) {
 		nlmsg_free(skb);
@@ -1359,7 +1343,7 @@
 
 	result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
 	if (result)
-		printk(KERN_INFO "failed to send netlink event:%d", result);
+		pr_info("failed to send netlink event:%d\n", result);
 
 	return result;
 }
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 794ecb4..e1235ac 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -102,7 +102,7 @@
  *	You can find the original tools for this direct from Multitech
  *		ftp://ftp.multitech.com/ISI-Cards/
  *
- *	Having installed the cards the module options (/etc/modprobe.conf)
+ *	Having installed the cards the module options (/etc/modprobe.d/)
  *
  *	options isicom   io=card1,card2,card3,card4 irq=card1,card2,card3,card4
  *
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index a9234ba8..c4b50af 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -127,11 +127,6 @@
 
 #define HSU_REGS_BUFSIZE	1024
 
-static int hsu_show_regs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
 
 static ssize_t port_show_regs(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
@@ -231,14 +226,14 @@
 
 static const struct file_operations port_regs_ops = {
 	.owner		= THIS_MODULE,
-	.open		= hsu_show_regs_open,
+	.open		= simple_open,
 	.read		= port_show_regs,
 	.llseek		= default_llseek,
 };
 
 static const struct file_operations dma_regs_ops = {
 	.owner		= THIS_MODULE,
-	.open		= hsu_show_regs_open,
+	.open		= simple_open,
 	.read		= dma_show_regs,
 	.llseek		= default_llseek,
 };
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index e825460..08b9962 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -304,11 +304,7 @@
 #ifdef CONFIG_DEBUG_FS
 
 #define PCH_REGS_BUFSIZE	1024
-static int pch_show_regs_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
+
 
 static ssize_t port_show_regs(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
@@ -362,7 +358,7 @@
 
 static const struct file_operations port_regs_ops = {
 	.owner		= THIS_MODULE,
-	.open		= pch_show_regs_open,
+	.open		= simple_open,
 	.read		= port_show_regs,
 	.llseek		= default_llseek,
 };
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index bf461cf..3158e17 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -355,9 +355,6 @@
 		WARN(1, "Invalid register access\n");
 }
 
-#define sci_in(up, offset)		(up->serial_in(up, offset))
-#define sci_out(up, offset, value)	(up->serial_out(up, offset, value))
-
 static int sci_probe_regmap(struct plat_sci_port *cfg)
 {
 	switch (cfg->type) {
@@ -422,9 +419,9 @@
 	int c;
 
 	do {
-		status = sci_in(port, SCxSR);
+		status = serial_port_in(port, SCxSR);
 		if (status & SCxSR_ERRORS(port)) {
-			sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
+			serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
 			continue;
 		}
 		break;
@@ -433,11 +430,11 @@
 	if (!(status & SCxSR_RDxF(port)))
 		return NO_POLL_CHAR;
 
-	c = sci_in(port, SCxRDR);
+	c = serial_port_in(port, SCxRDR);
 
 	/* Dummy read */
-	sci_in(port, SCxSR);
-	sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+	serial_port_in(port, SCxSR);
+	serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 
 	return c;
 }
@@ -448,11 +445,11 @@
 	unsigned short status;
 
 	do {
-		status = sci_in(port, SCxSR);
+		status = serial_port_in(port, SCxSR);
 	} while (!(status & SCxSR_TDxE(port)));
 
-	sci_out(port, SCxTDR, c);
-	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
+	serial_port_out(port, SCxTDR, c);
+	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
 }
 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
 
@@ -480,10 +477,10 @@
 	    ((!(cflag & CRTSCTS)))) {
 		unsigned short status;
 
-		status = sci_in(port, SCSPTR);
+		status = serial_port_in(port, SCSPTR);
 		status &= ~SCSPTR_CTSIO;
 		status |= SCSPTR_RTSIO;
-		sci_out(port, SCSPTR, status); /* Set RTS = 1 */
+		serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
 	}
 }
 
@@ -493,13 +490,13 @@
 
 	reg = sci_getreg(port, SCTFDR);
 	if (reg->size)
-		return sci_in(port, SCTFDR) & 0xff;
+		return serial_port_in(port, SCTFDR) & 0xff;
 
 	reg = sci_getreg(port, SCFDR);
 	if (reg->size)
-		return sci_in(port, SCFDR) >> 8;
+		return serial_port_in(port, SCFDR) >> 8;
 
-	return !(sci_in(port, SCxSR) & SCI_TDRE);
+	return !(serial_port_in(port, SCxSR) & SCI_TDRE);
 }
 
 static int sci_txroom(struct uart_port *port)
@@ -513,13 +510,13 @@
 
 	reg = sci_getreg(port, SCRFDR);
 	if (reg->size)
-		return sci_in(port, SCRFDR) & 0xff;
+		return serial_port_in(port, SCRFDR) & 0xff;
 
 	reg = sci_getreg(port, SCFDR);
 	if (reg->size)
-		return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1);
+		return serial_port_in(port, SCFDR) & ((port->fifosize << 1) - 1);
 
-	return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+	return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
 }
 
 /*
@@ -547,14 +544,14 @@
 	unsigned short ctrl;
 	int count;
 
-	status = sci_in(port, SCxSR);
+	status = serial_port_in(port, SCxSR);
 	if (!(status & SCxSR_TDxE(port))) {
-		ctrl = sci_in(port, SCSCR);
+		ctrl = serial_port_in(port, SCSCR);
 		if (uart_circ_empty(xmit))
 			ctrl &= ~SCSCR_TIE;
 		else
 			ctrl |= SCSCR_TIE;
-		sci_out(port, SCSCR, ctrl);
+		serial_port_out(port, SCSCR, ctrl);
 		return;
 	}
 
@@ -573,27 +570,27 @@
 			break;
 		}
 
-		sci_out(port, SCxTDR, c);
+		serial_port_out(port, SCxTDR, c);
 
 		port->icount.tx++;
 	} while (--count > 0);
 
-	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
+	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(port);
 	if (uart_circ_empty(xmit)) {
 		sci_stop_tx(port);
 	} else {
-		ctrl = sci_in(port, SCSCR);
+		ctrl = serial_port_in(port, SCSCR);
 
 		if (port->type != PORT_SCI) {
-			sci_in(port, SCxSR); /* Dummy read */
-			sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
+			serial_port_in(port, SCxSR); /* Dummy read */
+			serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
 		}
 
 		ctrl |= SCSCR_TIE;
-		sci_out(port, SCSCR, ctrl);
+		serial_port_out(port, SCSCR, ctrl);
 	}
 }
 
@@ -608,7 +605,7 @@
 	unsigned short status;
 	unsigned char flag;
 
-	status = sci_in(port, SCxSR);
+	status = serial_port_in(port, SCxSR);
 	if (!(status & SCxSR_RDxF(port)))
 		return;
 
@@ -621,7 +618,7 @@
 			break;
 
 		if (port->type == PORT_SCI) {
-			char c = sci_in(port, SCxRDR);
+			char c = serial_port_in(port, SCxRDR);
 			if (uart_handle_sysrq_char(port, c) ||
 			    sci_port->break_flag)
 				count = 0;
@@ -629,9 +626,9 @@
 				tty_insert_flip_char(tty, c, TTY_NORMAL);
 		} else {
 			for (i = 0; i < count; i++) {
-				char c = sci_in(port, SCxRDR);
+				char c = serial_port_in(port, SCxRDR);
 
-				status = sci_in(port, SCxSR);
+				status = serial_port_in(port, SCxSR);
 #if defined(CONFIG_CPU_SH3)
 				/* Skip "chars" during break */
 				if (sci_port->break_flag) {
@@ -672,8 +669,8 @@
 			}
 		}
 
-		sci_in(port, SCxSR); /* dummy read */
-		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+		serial_port_in(port, SCxSR); /* dummy read */
+		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 
 		copied += count;
 		port->icount.rx += count;
@@ -683,8 +680,8 @@
 		/* Tell the rest of the system the news. New characters! */
 		tty_flip_buffer_push(tty);
 	} else {
-		sci_in(port, SCxSR); /* dummy read */
-		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+		serial_port_in(port, SCxSR); /* dummy read */
+		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 	}
 }
 
@@ -726,7 +723,7 @@
 static int sci_handle_errors(struct uart_port *port)
 {
 	int copied = 0;
-	unsigned short status = sci_in(port, SCxSR);
+	unsigned short status = serial_port_in(port, SCxSR);
 	struct tty_struct *tty = port->state->port.tty;
 	struct sci_port *s = to_sci_port(port);
 
@@ -804,8 +801,8 @@
 	if (!reg->size)
 		return 0;
 
-	if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
-		sci_out(port, SCLSR, 0);
+	if ((serial_port_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
+		serial_port_out(port, SCLSR, 0);
 
 		port->icount.overrun++;
 
@@ -822,7 +819,7 @@
 static int sci_handle_breaks(struct uart_port *port)
 {
 	int copied = 0;
-	unsigned short status = sci_in(port, SCxSR);
+	unsigned short status = serial_port_in(port, SCxSR);
 	struct tty_struct *tty = port->state->port.tty;
 	struct sci_port *s = to_sci_port(port);
 
@@ -859,8 +856,8 @@
 	struct sci_port *s = to_sci_port(port);
 
 	if (s->chan_rx) {
-		u16 scr = sci_in(port, SCSCR);
-		u16 ssr = sci_in(port, SCxSR);
+		u16 scr = serial_port_in(port, SCSCR);
+		u16 ssr = serial_port_in(port, SCxSR);
 
 		/* Disable future Rx interrupts */
 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
@@ -869,9 +866,9 @@
 		} else {
 			scr &= ~SCSCR_RIE;
 		}
-		sci_out(port, SCSCR, scr);
+		serial_port_out(port, SCSCR, scr);
 		/* Clear current interrupt */
-		sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
+		serial_port_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
 		dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
 			jiffies, s->rx_timeout);
 		mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
@@ -909,15 +906,15 @@
 	if (port->type == PORT_SCI) {
 		if (sci_handle_errors(port)) {
 			/* discard character in rx buffer */
-			sci_in(port, SCxSR);
-			sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+			serial_port_in(port, SCxSR);
+			serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 		}
 	} else {
 		sci_handle_fifo_overrun(port);
 		sci_rx_interrupt(irq, ptr);
 	}
 
-	sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
+	serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
 
 	/* Kick the transmission */
 	sci_tx_interrupt(irq, ptr);
@@ -931,7 +928,7 @@
 
 	/* Handle BREAKs */
 	sci_handle_breaks(port);
-	sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
+	serial_port_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
 
 	return IRQ_HANDLED;
 }
@@ -955,8 +952,8 @@
 	struct sci_port *s = to_sci_port(port);
 	irqreturn_t ret = IRQ_NONE;
 
-	ssr_status = sci_in(port, SCxSR);
-	scr_status = sci_in(port, SCSCR);
+	ssr_status = serial_port_in(port, SCxSR);
+	scr_status = serial_port_in(port, SCSCR);
 	err_enabled = scr_status & port_rx_irq_mask(port);
 
 	/* Tx Interrupt */
@@ -1170,7 +1167,7 @@
 
 static unsigned int sci_tx_empty(struct uart_port *port)
 {
-	unsigned short status = sci_in(port, SCxSR);
+	unsigned short status = serial_port_in(port, SCxSR);
 	unsigned short in_tx_fifo = sci_txfill(port);
 
 	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
@@ -1198,7 +1195,7 @@
 		 */
 		reg = sci_getreg(port, SCFCR);
 		if (reg->size)
-			sci_out(port, SCFCR, sci_in(port, SCFCR) | 1);
+			serial_port_out(port, SCFCR, serial_port_in(port, SCFCR) | 1);
 	}
 }
 
@@ -1240,8 +1237,8 @@
 	} else {
 		s->cookie_tx = -EINVAL;
 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
-			u16 ctrl = sci_in(port, SCSCR);
-			sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+			u16 ctrl = serial_port_in(port, SCSCR);
+			serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
 		}
 	}
 
@@ -1494,13 +1491,13 @@
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
-		u16 new, scr = sci_in(port, SCSCR);
+		u16 new, scr = serial_port_in(port, SCSCR);
 		if (s->chan_tx)
 			new = scr | 0x8000;
 		else
 			new = scr & ~0x8000;
 		if (new != scr)
-			sci_out(port, SCSCR, new);
+			serial_port_out(port, SCSCR, new);
 	}
 
 	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
@@ -1512,8 +1509,8 @@
 
 	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
-		ctrl = sci_in(port, SCSCR);
-		sci_out(port, SCSCR, ctrl | SCSCR_TIE);
+		ctrl = serial_port_in(port, SCSCR);
+		serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
 	}
 }
 
@@ -1522,40 +1519,40 @@
 	unsigned short ctrl;
 
 	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
-	ctrl = sci_in(port, SCSCR);
+	ctrl = serial_port_in(port, SCSCR);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x8000;
 
 	ctrl &= ~SCSCR_TIE;
 
-	sci_out(port, SCSCR, ctrl);
+	serial_port_out(port, SCSCR, ctrl);
 }
 
 static void sci_start_rx(struct uart_port *port)
 {
 	unsigned short ctrl;
 
-	ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
+	ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
 
-	sci_out(port, SCSCR, ctrl);
+	serial_port_out(port, SCSCR, ctrl);
 }
 
 static void sci_stop_rx(struct uart_port *port)
 {
 	unsigned short ctrl;
 
-	ctrl = sci_in(port, SCSCR);
+	ctrl = serial_port_in(port, SCSCR);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
 
 	ctrl &= ~port_rx_irq_mask(port);
 
-	sci_out(port, SCSCR, ctrl);
+	serial_port_out(port, SCSCR, ctrl);
 }
 
 static void sci_enable_ms(struct uart_port *port)
@@ -1589,13 +1586,13 @@
 {
 	struct sci_port *s = (struct sci_port *)arg;
 	struct uart_port *port = &s->port;
-	u16 scr = sci_in(port, SCSCR);
+	u16 scr = serial_port_in(port, SCSCR);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		scr &= ~0x4000;
 		enable_irq(s->cfg->irqs[1]);
 	}
-	sci_out(port, SCSCR, scr | SCSCR_RIE);
+	serial_port_out(port, SCSCR, scr | SCSCR_RIE);
 	dev_dbg(port->dev, "DMA Rx timed out\n");
 	schedule_work(&s->work_rx);
 }
@@ -1776,14 +1773,14 @@
 	unsigned int status;
 
 	do {
-		status = sci_in(port, SCxSR);
+		status = serial_port_in(port, SCxSR);
 	} while (!(status & SCxSR_TEND(port)));
 
-	sci_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
+	serial_port_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
 
 	reg = sci_getreg(port, SCFCR);
 	if (reg->size)
-		sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+		serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
 }
 
 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -1812,7 +1809,7 @@
 
 	sci_reset(port);
 
-	smr_val = sci_in(port, SCSMR) & 3;
+	smr_val = serial_port_in(port, SCSMR) & 3;
 
 	if ((termios->c_cflag & CSIZE) == CS7)
 		smr_val |= 0x40;
@@ -1825,19 +1822,19 @@
 
 	uart_update_timeout(port, termios->c_cflag, baud);
 
-	sci_out(port, SCSMR, smr_val);
+	serial_port_out(port, SCSMR, smr_val);
 
 	dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
 		s->cfg->scscr);
 
 	if (t > 0) {
 		if (t >= 256) {
-			sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
+			serial_port_out(port, SCSMR, (serial_port_in(port, SCSMR) & ~3) | 1);
 			t >>= 2;
 		} else
-			sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
+			serial_port_out(port, SCSMR, serial_port_in(port, SCSMR) & ~3);
 
-		sci_out(port, SCBRR, t);
+		serial_port_out(port, SCBRR, t);
 		udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
 	}
 
@@ -1845,7 +1842,7 @@
 
 	reg = sci_getreg(port, SCFCR);
 	if (reg->size) {
-		unsigned short ctrl = sci_in(port, SCFCR);
+		unsigned short ctrl = serial_port_in(port, SCFCR);
 
 		if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
 			if (termios->c_cflag & CRTSCTS)
@@ -1861,10 +1858,10 @@
 		 */
 		ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
 
-		sci_out(port, SCFCR, ctrl);
+		serial_port_out(port, SCFCR, ctrl);
 	}
 
-	sci_out(port, SCSCR, s->cfg->scscr);
+	serial_port_out(port, SCSCR, s->cfg->scscr);
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	/*
@@ -2166,7 +2163,7 @@
 
 	/* wait until fifo is empty and last bit has been transmitted */
 	bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
-	while ((sci_in(port, SCxSR) & bits) != bits)
+	while ((serial_port_in(port, SCxSR) & bits) != bits)
 		cpu_relax();
 
 	sci_port_disable(sci_port);
@@ -2260,12 +2257,12 @@
 	if (uart_console(port)) {
 		struct plat_sci_reg *reg;
 
-		sci_port->saved_smr = sci_in(port, SCSMR);
-		sci_port->saved_brr = sci_in(port, SCBRR);
+		sci_port->saved_smr = serial_port_in(port, SCSMR);
+		sci_port->saved_brr = serial_port_in(port, SCBRR);
 
 		reg = sci_getreg(port, SCFCR);
 		if (reg->size)
-			sci_port->saved_fcr = sci_in(port, SCFCR);
+			sci_port->saved_fcr = serial_port_in(port, SCFCR);
 		else
 			sci_port->saved_fcr = 0;
 	}
@@ -2279,13 +2276,13 @@
 
 	if (uart_console(port)) {
 		sci_reset(port);
-		sci_out(port, SCSMR, sci_port->saved_smr);
-		sci_out(port, SCBRR, sci_port->saved_brr);
+		serial_port_out(port, SCSMR, sci_port->saved_smr);
+		serial_port_out(port, SCBRR, sci_port->saved_brr);
 
 		if (sci_port->saved_fcr)
-			sci_out(port, SCFCR, sci_port->saved_fcr);
+			serial_port_out(port, SCFCR, sci_port->saved_fcr);
 
-		sci_out(port, SCSCR, sci_port->cfg->scscr);
+		serial_port_out(port, SCSCR, sci_port->cfg->scscr);
 	}
 	return 0;
 }
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index a1a2d36..4c22a15 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -20,10 +20,10 @@
     defined(CONFIG_ARCH_SH7372) || \
     defined(CONFIG_ARCH_R8A7740)
 
-# define SCxSR_RDxF_CLEAR(port)	 (sci_in(port, SCxSR) & 0xfffc)
-# define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73)
-# define SCxSR_TDxE_CLEAR(port)	 (sci_in(port, SCxSR) & 0xffdf)
-# define SCxSR_BREAK_CLEAR(port) (sci_in(port, SCxSR) & 0xffe3)
+# define SCxSR_RDxF_CLEAR(port)	 (serial_port_in(port, SCxSR) & 0xfffc)
+# define SCxSR_ERROR_CLEAR(port) (serial_port_in(port, SCxSR) & 0xfd73)
+# define SCxSR_TDxE_CLEAR(port)	 (serial_port_in(port, SCxSR) & 0xffdf)
+# define SCxSR_BREAK_CLEAR(port) (serial_port_in(port, SCxSR) & 0xffe3)
 #else
 # define SCxSR_RDxF_CLEAR(port)	 (((port)->type == PORT_SCI) ? 0xbc : 0x00fc)
 # define SCxSR_ERROR_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x0073)
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index b3b70b0..babd947 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1581,7 +1581,7 @@
 	if (err)
 		goto out_unregister_uart;
 
-	if (!zilog_irq) {
+	if (zilog_irq) {
 		struct uart_sunzilog_port *up = sunzilog_irq_chain;
 		err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
 				  "zs", sunzilog_irq_chain);
@@ -1622,7 +1622,7 @@
 {
 	platform_driver_unregister(&zs_driver);
 
-	if (!zilog_irq) {
+	if (zilog_irq) {
 		struct uart_sunzilog_port *up = sunzilog_irq_chain;
 
 		/* Disable Interrupts */
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 136e86f..05728894 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -327,7 +327,7 @@
 		if (is_global_init(p))
 			continue;
 
-		force_sig(sig, p);
+		do_send_sig_info(sig, SEND_SIG_FORCED, p, true);
 	}
 	read_unlock(&tasklist_lock);
 }
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index cefa0c8..d2b9af5 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -428,18 +428,10 @@
 	return retval;
 }
 
-static int default_open (struct inode *inode, struct file *file)
-{
-	if (inode->i_private)
-		file->private_data = inode->i_private;
-
-	return 0;
-}
-
 static const struct file_operations default_file_operations = {
 	.read =		default_read_file,
 	.write =	default_write_file,
-	.open =		default_open,
+	.open =		simple_open,
 	.llseek =	default_file_lseek,
 };
 
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 85a5ceb..965a629 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -345,7 +345,7 @@
 		}
 
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-				skb->len <= 1, req->actual, req->actual);
+				skb->len <= 1, req->actual, PAGE_SIZE);
 		page = NULL;
 
 		if (req->actual < req->length) { /* Last fragment */
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 19f318a..cf14c95 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -13,6 +13,7 @@
 
 #include <linux/clk.h>
 #include <linux/platform_device.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
 
 /* interface and function clocks */
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index fd9109d..680e1a3 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -352,7 +352,6 @@
 static int debug_periodic_open(struct inode *, struct file *);
 static int debug_registers_open(struct inode *, struct file *);
 static int debug_async_open(struct inode *, struct file *);
-static int debug_lpm_open(struct inode *, struct file *);
 static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
 				   size_t count, loff_t *ppos);
 static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
@@ -385,7 +384,7 @@
 };
 static const struct file_operations debug_lpm_fops = {
 	.owner		= THIS_MODULE,
-	.open		= debug_lpm_open,
+	.open		= simple_open,
 	.read		= debug_lpm_read,
 	.write		= debug_lpm_write,
 	.release	= debug_lpm_close,
@@ -970,12 +969,6 @@
 	return file->private_data ? 0 : -ENOMEM;
 }
 
-static int debug_lpm_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static int debug_lpm_close(struct inode *inode, struct file *file)
 {
 	return 0;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index db8963f..09f597a 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -27,6 +27,10 @@
 #error "CONFIG_ARCH_AT91 must be defined."
 #endif
 
+#define valid_port(index)	((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
+#define at91_for_each_port(index)	\
+		for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++)
+
 /* interface and function clocks; sometimes also an AHB clock */
 static struct clk *iclk, *fclk, *hclk;
 static int clocked;
@@ -240,26 +244,26 @@
 
 static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
 {
-	if (port < 0 || port >= 2)
+	if (!valid_port(port))
 		return;
 
 	if (!gpio_is_valid(pdata->vbus_pin[port]))
 		return;
 
 	gpio_set_value(pdata->vbus_pin[port],
-		       !pdata->vbus_pin_active_low[port] ^ enable);
+		       pdata->vbus_pin_active_low[port] ^ enable);
 }
 
 static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
 {
-	if (port < 0 || port >= 2)
+	if (!valid_port(port))
 		return -EINVAL;
 
 	if (!gpio_is_valid(pdata->vbus_pin[port]))
 		return -EINVAL;
 
 	return gpio_get_value(pdata->vbus_pin[port]) ^
-		!pdata->vbus_pin_active_low[port];
+		pdata->vbus_pin_active_low[port];
 }
 
 /*
@@ -271,9 +275,9 @@
 	int length = ohci_hub_status_data(hcd, buf);
 	int port;
 
-	for (port = 0; port < ARRAY_SIZE(pdata->overcurrent_pin); port++) {
+	at91_for_each_port(port) {
 		if (pdata->overcurrent_changed[port]) {
-			if (! length)
+			if (!length)
 				length = 1;
 			buf[0] |= 1 << (port + 1);
 		}
@@ -297,11 +301,17 @@
 		"ohci_at91_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
 		hcd, typeReq, wValue, wIndex, buf, wLength);
 
+	wIndex--;
+
 	switch (typeReq) {
 	case SetPortFeature:
 		if (wValue == USB_PORT_FEAT_POWER) {
 			dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
-			ohci_at91_usb_set_power(pdata, wIndex - 1, 1);
+			if (valid_port(wIndex)) {
+				ohci_at91_usb_set_power(pdata, wIndex, 1);
+				ret = 0;
+			}
+
 			goto out;
 		}
 		break;
@@ -312,9 +322,9 @@
 			dev_dbg(hcd->self.controller,
 				"ClearPortFeature: C_OVER_CURRENT\n");
 
-			if (wIndex == 1 || wIndex == 2) {
-				pdata->overcurrent_changed[wIndex-1] = 0;
-				pdata->overcurrent_status[wIndex-1] = 0;
+			if (valid_port(wIndex)) {
+				pdata->overcurrent_changed[wIndex] = 0;
+				pdata->overcurrent_status[wIndex] = 0;
 			}
 
 			goto out;
@@ -323,9 +333,8 @@
 			dev_dbg(hcd->self.controller,
 				"ClearPortFeature: OVER_CURRENT\n");
 
-			if (wIndex == 1 || wIndex == 2) {
-				pdata->overcurrent_status[wIndex-1] = 0;
-			}
+			if (valid_port(wIndex))
+				pdata->overcurrent_status[wIndex] = 0;
 
 			goto out;
 
@@ -333,15 +342,15 @@
 			dev_dbg(hcd->self.controller,
 				"ClearPortFeature: POWER\n");
 
-			if (wIndex == 1 || wIndex == 2) {
-				ohci_at91_usb_set_power(pdata, wIndex - 1, 0);
+			if (valid_port(wIndex)) {
+				ohci_at91_usb_set_power(pdata, wIndex, 0);
 				return 0;
 			}
 		}
 		break;
 	}
 
-	ret = ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+	ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
 	if (ret)
 		goto out;
 
@@ -377,18 +386,15 @@
 
 		dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
 
-		if (wIndex == 1 || wIndex == 2) {
-			if (! ohci_at91_usb_get_power(pdata, wIndex-1)) {
+		if (valid_port(wIndex)) {
+			if (!ohci_at91_usb_get_power(pdata, wIndex))
 				*data &= ~cpu_to_le32(RH_PS_PPS);
-			}
 
-			if (pdata->overcurrent_changed[wIndex-1]) {
+			if (pdata->overcurrent_changed[wIndex])
 				*data |= cpu_to_le32(RH_PS_OCIC);
-			}
 
-			if (pdata->overcurrent_status[wIndex-1]) {
+			if (pdata->overcurrent_status[wIndex])
 				*data |= cpu_to_le32(RH_PS_POCI);
-			}
 		}
 	}
 
@@ -450,14 +456,14 @@
 
 	/* From the GPIO notifying the over-current situation, find
 	 * out the corresponding port */
-	for (port = 0; port < ARRAY_SIZE(pdata->overcurrent_pin); port++) {
+	at91_for_each_port(port) {
 		if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
 			gpio = pdata->overcurrent_pin[port];
 			break;
 		}
 	}
 
-	if (port == ARRAY_SIZE(pdata->overcurrent_pin)) {
+	if (port == AT91_MAX_USBH_PORTS) {
 		dev_err(& pdev->dev, "overcurrent interrupt from unknown GPIO\n");
 		return IRQ_HANDLED;
 	}
@@ -467,7 +473,7 @@
 	/* When notified of an over-current situation, disable power
 	   on the corresponding port, and mark this port in
 	   over-current. */
-	if (! val) {
+	if (!val) {
 		ohci_at91_usb_set_power(pdata, port, 0);
 		pdata->overcurrent_status[port]  = 1;
 		pdata->overcurrent_changed[port] = 1;
@@ -492,7 +498,7 @@
 static int __devinit ohci_at91_of_init(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
-	int i, ret, gpio;
+	int i, gpio;
 	enum of_gpio_flags flags;
 	struct at91_usbh_data	*pdata;
 	u32 ports;
@@ -514,48 +520,17 @@
 	if (!of_property_read_u32(np, "num-ports", &ports))
 		pdata->ports = ports;
 
-	for (i = 0; i < 2; i++) {
+	at91_for_each_port(i) {
 		gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, &flags);
 		pdata->vbus_pin[i] = gpio;
 		if (!gpio_is_valid(gpio))
 			continue;
 		pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
-		ret = gpio_request(gpio, "ohci_vbus");
-		if (ret) {
-			dev_warn(&pdev->dev, "can't request vbus gpio %d", gpio);
-			continue;
-		}
-		ret = gpio_direction_output(gpio, !(flags & OF_GPIO_ACTIVE_LOW) ^ 1);
-		if (ret)
-			dev_warn(&pdev->dev, "can't put vbus gpio %d as output %d",
-				 !(flags & OF_GPIO_ACTIVE_LOW) ^ 1, gpio);
 	}
 
-	for (i = 0; i < 2; i++) {
-		gpio = of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
-		pdata->overcurrent_pin[i] = gpio;
-		if (!gpio_is_valid(gpio))
-			continue;
-		ret = gpio_request(gpio, "ohci_overcurrent");
-		if (ret) {
-			dev_err(&pdev->dev, "can't request overcurrent gpio %d", gpio);
-			continue;
-		}
-
-		ret = gpio_direction_input(gpio);
-		if (ret) {
-			dev_err(&pdev->dev, "can't configure overcurrent gpio %d as input", gpio);
-			continue;
-		}
-
-		ret = request_irq(gpio_to_irq(gpio),
-				  ohci_hcd_at91_overcurrent_irq,
-				  IRQF_SHARED, "ohci_overcurrent", pdev);
-		if (ret) {
-			gpio_free(gpio);
-			dev_warn(& pdev->dev, "cannot get GPIO IRQ for overcurrent\n");
-		}
-	}
+	at91_for_each_port(i)
+		pdata->overcurrent_pin[i] =
+			of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
 
 	pdev->dev.platform_data = pdata;
 
@@ -574,35 +549,69 @@
 {
 	struct at91_usbh_data	*pdata;
 	int			i;
+	int			gpio;
+	int			ret;
 
-	i = ohci_at91_of_init(pdev);
-
-	if (i)
-		return i;
+	ret = ohci_at91_of_init(pdev);
+	if (ret)
+		return ret;
 
 	pdata = pdev->dev.platform_data;
 
 	if (pdata) {
-		for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
+		at91_for_each_port(i) {
 			if (!gpio_is_valid(pdata->vbus_pin[i]))
 				continue;
-			gpio_request(pdata->vbus_pin[i], "ohci_vbus");
+			gpio = pdata->vbus_pin[i];
+
+			ret = gpio_request(gpio, "ohci_vbus");
+			if (ret) {
+				dev_err(&pdev->dev,
+					"can't request vbus gpio %d\n", gpio);
+				continue;
+			}
+			ret = gpio_direction_output(gpio,
+						!pdata->vbus_pin_active_low[i]);
+			if (ret) {
+				dev_err(&pdev->dev,
+					"can't put vbus gpio %d as output %d\n",
+					gpio, !pdata->vbus_pin_active_low[i]);
+				gpio_free(gpio);
+				continue;
+			}
+
 			ohci_at91_usb_set_power(pdata, i, 1);
 		}
 
-		for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
-			int ret;
-
+		at91_for_each_port(i) {
 			if (!gpio_is_valid(pdata->overcurrent_pin[i]))
 				continue;
-			gpio_request(pdata->overcurrent_pin[i], "ohci_overcurrent");
+			gpio = pdata->overcurrent_pin[i];
 
-			ret = request_irq(gpio_to_irq(pdata->overcurrent_pin[i]),
+			ret = gpio_request(gpio, "ohci_overcurrent");
+			if (ret) {
+				dev_err(&pdev->dev,
+					"can't request overcurrent gpio %d\n",
+					gpio);
+				continue;
+			}
+
+			ret = gpio_direction_input(gpio);
+			if (ret) {
+				dev_err(&pdev->dev,
+					"can't configure overcurrent gpio %d as input\n",
+					gpio);
+				gpio_free(gpio);
+				continue;
+			}
+
+			ret = request_irq(gpio_to_irq(gpio),
 					  ohci_hcd_at91_overcurrent_irq,
 					  IRQF_SHARED, "ohci_overcurrent", pdev);
 			if (ret) {
-				gpio_free(pdata->overcurrent_pin[i]);
-				dev_warn(& pdev->dev, "cannot get GPIO IRQ for overcurrent\n");
+				gpio_free(gpio);
+				dev_err(&pdev->dev,
+					"can't get gpio IRQ for overcurrent\n");
 			}
 		}
 	}
@@ -617,14 +626,14 @@
 	int			i;
 
 	if (pdata) {
-		for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
+		at91_for_each_port(i) {
 			if (!gpio_is_valid(pdata->vbus_pin[i]))
 				continue;
 			ohci_at91_usb_set_power(pdata, i, 0);
 			gpio_free(pdata->vbus_pin[i]);
 		}
 
-		for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
+		at91_for_each_port(i) {
 			if (!gpio_is_valid(pdata->overcurrent_pin[i]))
 				continue;
 			free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7c229d3..ff8605b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1724,7 +1724,8 @@
 
 /*
  * Module parameter to control latency timer for NDI FTDI-based USB devices.
- * If this value is not set in modprobe.conf.local its value will be set to 1ms.
+ * If this value is not set in /etc/modprobe.d/ its value will be set
+ * to 1ms.
  */
 static int ndi_latency_timer = 1;
 
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index fe2d803..7691c86 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -222,7 +222,7 @@
 	  for usb-storage and ub drivers, and allows to switch binding
 	  of these devices without rebuilding modules.
 
-	  Typical syntax of /etc/modprobe.conf is:
+	  Typical syntax of /etc/modprobe.d/*conf is:
 
 		options libusual bias="ub"
 
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
index 2eecec0..6ec45be 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/uwb/uwb-debug.c
@@ -159,13 +159,6 @@
 	return uwb_rc_ie_rm(rc, ie_to_rm->data[0]);
 }
 
-static int command_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
 static ssize_t command_write(struct file *file, const char __user *buf,
 			 size_t len, loff_t *off)
 {
@@ -206,7 +199,7 @@
 }
 
 static const struct file_operations command_fops = {
-	.open   = command_open,
+	.open	= simple_open,
 	.write  = command_write,
 	.read   = NULL,
 	.llseek = no_llseek,
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 7ed9991..af16884 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -245,6 +245,12 @@
 	  If you have a LCD backlight connected to the WLED output of DA9030
 	  or DA9034 WLED output, say Y here to enable this driver.
 
+config BACKLIGHT_DA9052
+	tristate "Dialog DA9052/DA9053 WLED"
+	depends on PMIC_DA9052
+	help
+	  Enable the Backlight Driver for DA9052-BC and DA9053-AA/Bx PMICs.
+
 config BACKLIGHT_MAX8925
 	tristate "Backlight driver for MAX8925"
 	depends on MFD_MAX8925
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 8071eb6..36855ae 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
 obj-$(CONFIG_BACKLIGHT_PWM)	+= pwm_bl.o
 obj-$(CONFIG_BACKLIGHT_DA903X)	+= da903x_bl.o
+obj-$(CONFIG_BACKLIGHT_DA9052)	+= da9052_bl.o
 obj-$(CONFIG_BACKLIGHT_MAX8925)	+= max8925_bl.o
 obj-$(CONFIG_BACKLIGHT_APPLE)	+= apple_bl.o
 obj-$(CONFIG_BACKLIGHT_TOSA)	+= tosa_bl.o
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
new file mode 100644
index 0000000..b628d68
--- /dev/null
+++ b/drivers/video/backlight/da9052_bl.c
@@ -0,0 +1,187 @@
+/*
+ * Backlight Driver for Dialog DA9052 PMICs
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_MAX_BRIGHTNESS		0xFF
+
+enum {
+	DA9052_WLEDS_OFF,
+	DA9052_WLEDS_ON,
+};
+
+enum {
+	DA9052_TYPE_WLED1,
+	DA9052_TYPE_WLED2,
+	DA9052_TYPE_WLED3,
+};
+
+static unsigned char wled_bank[] = {
+	DA9052_LED1_CONF_REG,
+	DA9052_LED2_CONF_REG,
+	DA9052_LED3_CONF_REG,
+};
+
+struct da9052_bl {
+	struct da9052 *da9052;
+	uint brightness;
+	uint state;
+	uint led_reg;
+};
+
+static int da9052_adjust_wled_brightness(struct da9052_bl *wleds)
+{
+	unsigned char boost_en;
+	unsigned char i_sink;
+	int ret;
+
+	boost_en = 0x3F;
+	i_sink = 0xFF;
+	if (wleds->state == DA9052_WLEDS_OFF) {
+		boost_en = 0x00;
+		i_sink = 0x00;
+	}
+
+	ret = da9052_reg_write(wleds->da9052, DA9052_BOOST_REG, boost_en);
+	if (ret < 0)
+		return ret;
+
+	ret = da9052_reg_write(wleds->da9052, DA9052_LED_CONT_REG, i_sink);
+	if (ret < 0)
+		return ret;
+
+	ret = da9052_reg_write(wleds->da9052, wled_bank[wleds->led_reg], 0x0);
+	if (ret < 0)
+		return ret;
+
+	msleep(10);
+
+	if (wleds->brightness) {
+		ret = da9052_reg_write(wleds->da9052, wled_bank[wleds->led_reg],
+				       wleds->brightness);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int da9052_backlight_update_status(struct backlight_device *bl)
+{
+	int brightness = bl->props.brightness;
+	struct da9052_bl *wleds = bl_get_data(bl);
+
+	wleds->brightness = brightness;
+	wleds->state = DA9052_WLEDS_ON;
+
+	return da9052_adjust_wled_brightness(wleds);
+}
+
+static int da9052_backlight_get_brightness(struct backlight_device *bl)
+{
+	struct da9052_bl *wleds = bl_get_data(bl);
+
+	return wleds->brightness;
+}
+
+static const struct backlight_ops da9052_backlight_ops = {
+	.update_status = da9052_backlight_update_status,
+	.get_brightness = da9052_backlight_get_brightness,
+};
+
+static int da9052_backlight_probe(struct platform_device *pdev)
+{
+	struct backlight_device *bl;
+	struct backlight_properties props;
+	struct da9052_bl *wleds;
+
+	wleds = devm_kzalloc(&pdev->dev, sizeof(struct da9052_bl), GFP_KERNEL);
+	if (!wleds)
+		return -ENOMEM;
+
+	wleds->da9052 = dev_get_drvdata(pdev->dev.parent);
+	wleds->brightness = 0;
+	wleds->led_reg = platform_get_device_id(pdev)->driver_data;
+	wleds->state = DA9052_WLEDS_OFF;
+
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = DA9052_MAX_BRIGHTNESS;
+
+	bl = backlight_device_register(pdev->name, wleds->da9052->dev, wleds,
+				       &da9052_backlight_ops, &props);
+	if (IS_ERR(bl)) {
+		dev_err(&pdev->dev, "Failed to register backlight\n");
+		devm_kfree(&pdev->dev, wleds);
+		return PTR_ERR(bl);
+	}
+
+	bl->props.max_brightness = DA9052_MAX_BRIGHTNESS;
+	bl->props.brightness = 0;
+	platform_set_drvdata(pdev, bl);
+
+	return da9052_adjust_wled_brightness(wleds);
+}
+
+static int da9052_backlight_remove(struct platform_device *pdev)
+{
+	struct backlight_device *bl = platform_get_drvdata(pdev);
+	struct da9052_bl *wleds = bl_get_data(bl);
+
+	wleds->brightness = 0;
+	wleds->state = DA9052_WLEDS_OFF;
+	da9052_adjust_wled_brightness(wleds);
+	backlight_device_unregister(bl);
+	devm_kfree(&pdev->dev, wleds);
+
+	return 0;
+}
+
+static struct platform_device_id da9052_wled_ids[] = {
+	{
+		.name		= "da9052-wled1",
+		.driver_data	= DA9052_TYPE_WLED1,
+	},
+	{
+		.name		= "da9052-wled2",
+		.driver_data	= DA9052_TYPE_WLED2,
+	},
+	{
+		.name		= "da9052-wled3",
+		.driver_data	= DA9052_TYPE_WLED3,
+	},
+};
+
+static struct platform_driver da9052_wled_driver = {
+	.probe		= da9052_backlight_probe,
+	.remove		= da9052_backlight_remove,
+	.id_table	= da9052_wled_ids,
+	.driver	= {
+		.name	= "da9052-wled",
+		.owner	= THIS_MODULE,
+	},
+};
+
+module_platform_driver(da9052_wled_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Backlight driver for DA9052 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-backlight");
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index be20b5c..3a6d541 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -229,14 +229,7 @@
 
 static int __init locomolcd_init(void)
 {
-	int ret = locomo_driver_register(&poodle_lcd_driver);
-	if (ret)
-		return ret;
-
-#ifdef CONFIG_SA1100_COLLIE
-	sa1100fb_lcd_power = locomolcd_power;
-#endif
-	return 0;
+	return locomo_driver_register(&poodle_lcd_driver);
 }
 
 static void __exit locomolcd_exit(void)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 958e512..05f0a80 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -398,21 +398,8 @@
 	return 0;
 }
 
-static int virtballoon_thaw(struct virtio_device *vdev)
-{
-	return restore_common(vdev);
-}
-
 static int virtballoon_restore(struct virtio_device *vdev)
 {
-	struct virtio_balloon *vb = vdev->priv;
-
-	/*
-	 * If a request wasn't complete at the time of freezing, this
-	 * could have been set.
-	 */
-	vb->need_stats_update = 0;
-
 	return restore_common(vdev);
 }
 #endif
@@ -434,7 +421,6 @@
 #ifdef CONFIG_PM
 	.freeze	=	virtballoon_freeze,
 	.restore =	virtballoon_restore,
-	.thaw =		virtballoon_thaw,
 #endif
 };
 
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 635e1ef..2e03d41 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -720,24 +720,6 @@
 }
 
 #ifdef CONFIG_PM
-static int virtio_pci_suspend(struct device *dev)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-
-	pci_save_state(pci_dev);
-	pci_set_power_state(pci_dev, PCI_D3hot);
-	return 0;
-}
-
-static int virtio_pci_resume(struct device *dev)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-
-	pci_restore_state(pci_dev);
-	pci_set_power_state(pci_dev, PCI_D0);
-	return 0;
-}
-
 static int virtio_pci_freeze(struct device *dev)
 {
 	struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -758,47 +740,6 @@
 	return ret;
 }
 
-static int restore_common(struct device *dev)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
-	int ret;
-
-	ret = pci_enable_device(pci_dev);
-	if (ret)
-		return ret;
-	pci_set_master(pci_dev);
-	vp_finalize_features(&vp_dev->vdev);
-
-	return ret;
-}
-
-static int virtio_pci_thaw(struct device *dev)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
-	struct virtio_driver *drv;
-	int ret;
-
-	ret = restore_common(dev);
-	if (ret)
-		return ret;
-
-	drv = container_of(vp_dev->vdev.dev.driver,
-			   struct virtio_driver, driver);
-
-	if (drv && drv->thaw)
-		ret = drv->thaw(&vp_dev->vdev);
-	else if (drv && drv->restore)
-		ret = drv->restore(&vp_dev->vdev);
-
-	/* Finally, tell the device we're all set */
-	if (!ret)
-		vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
-
-	return ret;
-}
-
 static int virtio_pci_restore(struct device *dev)
 {
 	struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -809,8 +750,14 @@
 	drv = container_of(vp_dev->vdev.dev.driver,
 			   struct virtio_driver, driver);
 
-	ret = restore_common(dev);
-	if (!ret && drv && drv->restore)
+	ret = pci_enable_device(pci_dev);
+	if (ret)
+		return ret;
+
+	pci_set_master(pci_dev);
+	vp_finalize_features(&vp_dev->vdev);
+
+	if (drv && drv->restore)
 		ret = drv->restore(&vp_dev->vdev);
 
 	/* Finally, tell the device we're all set */
@@ -821,12 +768,7 @@
 }
 
 static const struct dev_pm_ops virtio_pci_pm_ops = {
-	.suspend	= virtio_pci_suspend,
-	.resume		= virtio_pci_resume,
-	.freeze		= virtio_pci_freeze,
-	.thaw		= virtio_pci_thaw,
-	.restore	= virtio_pci_restore,
-	.poweroff	= virtio_pci_suspend,
+	SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
 };
 #endif
 
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 19e6a20..1afb4fb 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -204,7 +204,8 @@
 
 void *
 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-			   dma_addr_t *dma_handle, gfp_t flags)
+			   dma_addr_t *dma_handle, gfp_t flags,
+			   struct dma_attrs *attrs)
 {
 	void *ret;
 	int order = get_order(size);
@@ -253,7 +254,7 @@
 
 void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
-			  dma_addr_t dev_addr)
+			  dma_addr_t dev_addr, struct dma_attrs *attrs)
 {
 	int order = get_order(size);
 	phys_addr_t phys;
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 63616d7..97f5d26 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -234,7 +234,7 @@
 	if (dev_data)
 		dev_data->ack_intr = 0;
 
-	return result;
+	return result > 0 ? 0 : result;
 }
 
 static
diff --git a/fs/aio.c b/fs/aio.c
index 4f71627..da88760 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -305,15 +305,18 @@
 	return ERR_PTR(err);
 }
 
-/* aio_cancel_all
+/* kill_ctx
  *	Cancels all outstanding aio requests on an aio context.  Used 
  *	when the processes owning a context have all exited to encourage 
  *	the rapid destruction of the kioctx.
  */
-static void aio_cancel_all(struct kioctx *ctx)
+static void kill_ctx(struct kioctx *ctx)
 {
 	int (*cancel)(struct kiocb *, struct io_event *);
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
 	struct io_event res;
+
 	spin_lock_irq(&ctx->ctx_lock);
 	ctx->dead = 1;
 	while (!list_empty(&ctx->active_reqs)) {
@@ -329,15 +332,7 @@
 			spin_lock_irq(&ctx->ctx_lock);
 		}
 	}
-	spin_unlock_irq(&ctx->ctx_lock);
-}
 
-static void wait_for_all_aios(struct kioctx *ctx)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	spin_lock_irq(&ctx->ctx_lock);
 	if (!ctx->reqs_active)
 		goto out;
 
@@ -387,9 +382,7 @@
 		ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
 		hlist_del_rcu(&ctx->list);
 
-		aio_cancel_all(ctx);
-
-		wait_for_all_aios(ctx);
+		kill_ctx(ctx);
 
 		if (1 != atomic_read(&ctx->users))
 			printk(KERN_DEBUG
@@ -1269,8 +1262,7 @@
 	if (likely(!was_dead))
 		put_ioctx(ioctx);	/* twice for the list */
 
-	aio_cancel_all(ioctx);
-	wait_for_all_aios(ioctx);
+	kill_ctx(ioctx);
 
 	/*
 	 * Wake up any waiters.  The setting of ctx->dead must be seen
@@ -1278,7 +1270,6 @@
 	 * locking done by the above calls to ensure this consistency.
 	 */
 	wake_up_all(&ioctx->wait);
-	put_ioctx(ioctx);	/* once for the lookup */
 }
 
 /* sys_io_setup:
@@ -1315,11 +1306,9 @@
 	ret = PTR_ERR(ioctx);
 	if (!IS_ERR(ioctx)) {
 		ret = put_user(ioctx->user_id, ctxp);
-		if (!ret) {
-			put_ioctx(ioctx);
-			return 0;
-		}
-		io_destroy(ioctx);
+		if (ret)
+			io_destroy(ioctx);
+		put_ioctx(ioctx);
 	}
 
 out:
@@ -1337,6 +1326,7 @@
 	struct kioctx *ioctx = lookup_ioctx(ctx);
 	if (likely(NULL != ioctx)) {
 		io_destroy(ioctx);
+		put_ioctx(ioctx);
 		return 0;
 	}
 	pr_debug("EINVAL: io_destroy: invalid context id\n");
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0cc20b3..4270414 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -171,11 +171,11 @@
 	spin_unlock_irqrestore(&workers->lock, flags);
 }
 
-static noinline int run_ordered_completions(struct btrfs_workers *workers,
+static noinline void run_ordered_completions(struct btrfs_workers *workers,
 					    struct btrfs_work *work)
 {
 	if (!workers->ordered)
-		return 0;
+		return;
 
 	set_bit(WORK_DONE_BIT, &work->flags);
 
@@ -213,7 +213,6 @@
 	}
 
 	spin_unlock(&workers->order_lock);
-	return 0;
 }
 
 static void put_worker(struct btrfs_worker_thread *worker)
@@ -399,7 +398,7 @@
 /*
  * this will wait for all the worker threads to shutdown
  */
-int btrfs_stop_workers(struct btrfs_workers *workers)
+void btrfs_stop_workers(struct btrfs_workers *workers)
 {
 	struct list_head *cur;
 	struct btrfs_worker_thread *worker;
@@ -427,7 +426,6 @@
 		put_worker(worker);
 	}
 	spin_unlock_irq(&workers->lock);
-	return 0;
 }
 
 /*
@@ -615,14 +613,14 @@
  * it was taken from.  It is intended for use with long running work functions
  * that make some progress and want to give the cpu up for others.
  */
-int btrfs_requeue_work(struct btrfs_work *work)
+void btrfs_requeue_work(struct btrfs_work *work)
 {
 	struct btrfs_worker_thread *worker = work->worker;
 	unsigned long flags;
 	int wake = 0;
 
 	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
-		goto out;
+		return;
 
 	spin_lock_irqsave(&worker->lock, flags);
 	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
@@ -649,9 +647,6 @@
 	if (wake)
 		wake_up_process(worker->task);
 	spin_unlock_irqrestore(&worker->lock, flags);
-out:
-
-	return 0;
 }
 
 void btrfs_set_work_high_prio(struct btrfs_work *work)
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index f34cc31..063698b 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -111,9 +111,9 @@
 
 void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
 int btrfs_start_workers(struct btrfs_workers *workers);
-int btrfs_stop_workers(struct btrfs_workers *workers);
+void btrfs_stop_workers(struct btrfs_workers *workers);
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
 			struct btrfs_workers *async_starter);
-int btrfs_requeue_work(struct btrfs_work *work);
+void btrfs_requeue_work(struct btrfs_work *work);
 void btrfs_set_work_high_prio(struct btrfs_work *work);
 #endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0436c12..f4e9074 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -116,6 +116,7 @@
  * to a logical address
  */
 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
+					int search_commit_root,
 					struct __prelim_ref *ref,
 					struct ulist *parents)
 {
@@ -131,6 +132,7 @@
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
+	path->search_commit_root = !!search_commit_root;
 
 	root_key.objectid = ref->root_id;
 	root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -188,6 +190,7 @@
  * resolve all indirect backrefs from the list
  */
 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
+				   int search_commit_root,
 				   struct list_head *head)
 {
 	int err;
@@ -212,7 +215,8 @@
 			continue;
 		if (ref->count == 0)
 			continue;
-		err = __resolve_indirect_ref(fs_info, ref, parents);
+		err = __resolve_indirect_ref(fs_info, search_commit_root,
+					     ref, parents);
 		if (err) {
 			if (ret == 0)
 				ret = err;
@@ -586,6 +590,7 @@
 	struct btrfs_delayed_ref_head *head;
 	int info_level = 0;
 	int ret;
+	int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
 	struct list_head prefs_delayed;
 	struct list_head prefs;
 	struct __prelim_ref *ref;
@@ -600,6 +605,7 @@
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
+	path->search_commit_root = !!search_commit_root;
 
 	/*
 	 * grab both a lock on the path and a lock on the delayed ref head.
@@ -614,35 +620,39 @@
 		goto out;
 	BUG_ON(ret == 0);
 
-	/*
-	 * look if there are updates for this ref queued and lock the head
-	 */
-	delayed_refs = &trans->transaction->delayed_refs;
-	spin_lock(&delayed_refs->lock);
-	head = btrfs_find_delayed_ref_head(trans, bytenr);
-	if (head) {
-		if (!mutex_trylock(&head->mutex)) {
-			atomic_inc(&head->node.refs);
-			spin_unlock(&delayed_refs->lock);
+	if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
+		/*
+		 * look if there are updates for this ref queued and lock the
+		 * head
+		 */
+		delayed_refs = &trans->transaction->delayed_refs;
+		spin_lock(&delayed_refs->lock);
+		head = btrfs_find_delayed_ref_head(trans, bytenr);
+		if (head) {
+			if (!mutex_trylock(&head->mutex)) {
+				atomic_inc(&head->node.refs);
+				spin_unlock(&delayed_refs->lock);
 
-			btrfs_release_path(path);
+				btrfs_release_path(path);
 
-			/*
-			 * Mutex was contended, block until it's
-			 * released and try again
-			 */
-			mutex_lock(&head->mutex);
-			mutex_unlock(&head->mutex);
-			btrfs_put_delayed_ref(&head->node);
-			goto again;
+				/*
+				 * Mutex was contended, block until it's
+				 * released and try again
+				 */
+				mutex_lock(&head->mutex);
+				mutex_unlock(&head->mutex);
+				btrfs_put_delayed_ref(&head->node);
+				goto again;
+			}
+			ret = __add_delayed_refs(head, seq, &info_key,
+						 &prefs_delayed);
+			if (ret) {
+				spin_unlock(&delayed_refs->lock);
+				goto out;
+			}
 		}
-		ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
-		if (ret) {
-			spin_unlock(&delayed_refs->lock);
-			goto out;
-		}
+		spin_unlock(&delayed_refs->lock);
 	}
-	spin_unlock(&delayed_refs->lock);
 
 	if (path->slots[0]) {
 		struct extent_buffer *leaf;
@@ -679,7 +689,7 @@
 	if (ret)
 		goto out;
 
-	ret = __resolve_indirect_refs(fs_info, &prefs);
+	ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
 	if (ret)
 		goto out;
 
@@ -1074,8 +1084,7 @@
 	return 0;
 }
 
-static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
-				struct btrfs_path *path, u64 logical,
+static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
 				u64 orig_extent_item_objectid,
 				u64 extent_item_pos, u64 root,
 				iterate_extent_inodes_t *iterate, void *ctx)
@@ -1143,35 +1152,38 @@
  * calls iterate() for every inode that references the extent identified by
  * the given parameters.
  * when the iterator function returns a non-zero value, iteration stops.
- * path is guaranteed to be in released state when iterate() is called.
  */
 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
-				struct btrfs_path *path,
 				u64 extent_item_objectid, u64 extent_item_pos,
+				int search_commit_root,
 				iterate_extent_inodes_t *iterate, void *ctx)
 {
 	int ret;
 	struct list_head data_refs = LIST_HEAD_INIT(data_refs);
 	struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
 	struct btrfs_trans_handle *trans;
-	struct ulist *refs;
-	struct ulist *roots;
+	struct ulist *refs = NULL;
+	struct ulist *roots = NULL;
 	struct ulist_node *ref_node = NULL;
 	struct ulist_node *root_node = NULL;
 	struct seq_list seq_elem;
-	struct btrfs_delayed_ref_root *delayed_refs;
-
-	trans = btrfs_join_transaction(fs_info->extent_root);
-	if (IS_ERR(trans))
-		return PTR_ERR(trans);
+	struct btrfs_delayed_ref_root *delayed_refs = NULL;
 
 	pr_debug("resolving all inodes for extent %llu\n",
 			extent_item_objectid);
 
-	delayed_refs = &trans->transaction->delayed_refs;
-	spin_lock(&delayed_refs->lock);
-	btrfs_get_delayed_seq(delayed_refs, &seq_elem);
-	spin_unlock(&delayed_refs->lock);
+	if (search_commit_root) {
+		trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
+	} else {
+		trans = btrfs_join_transaction(fs_info->extent_root);
+		if (IS_ERR(trans))
+			return PTR_ERR(trans);
+
+		delayed_refs = &trans->transaction->delayed_refs;
+		spin_lock(&delayed_refs->lock);
+		btrfs_get_delayed_seq(delayed_refs, &seq_elem);
+		spin_unlock(&delayed_refs->lock);
+	}
 
 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
 				   extent_item_pos, seq_elem.seq,
@@ -1188,7 +1200,7 @@
 		while (!ret && (root_node = ulist_next(roots, root_node))) {
 			pr_debug("root %llu references leaf %llu\n",
 					root_node->val, ref_node->val);
-			ret = iterate_leaf_refs(fs_info, path, ref_node->val,
+			ret = iterate_leaf_refs(fs_info, ref_node->val,
 						extent_item_objectid,
 						extent_item_pos, root_node->val,
 						iterate, ctx);
@@ -1198,8 +1210,11 @@
 	ulist_free(refs);
 	ulist_free(roots);
 out:
-	btrfs_put_delayed_seq(delayed_refs, &seq_elem);
-	btrfs_end_transaction(trans, fs_info->extent_root);
+	if (!search_commit_root) {
+		btrfs_put_delayed_seq(delayed_refs, &seq_elem);
+		btrfs_end_transaction(trans, fs_info->extent_root);
+	}
+
 	return ret;
 }
 
@@ -1210,6 +1225,7 @@
 	int ret;
 	u64 extent_item_pos;
 	struct btrfs_key found_key;
+	int search_commit_root = path->search_commit_root;
 
 	ret = extent_from_logical(fs_info, logical, path,
 					&found_key);
@@ -1220,8 +1236,9 @@
 		return ret;
 
 	extent_item_pos = logical - found_key.objectid;
-	ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
-					extent_item_pos, iterate, ctx);
+	ret = iterate_extent_inodes(fs_info, found_key.objectid,
+					extent_item_pos, search_commit_root,
+					iterate, ctx);
 
 	return ret;
 }
@@ -1342,12 +1359,6 @@
 				inode_to_path, ipath);
 }
 
-/*
- * allocates space to return multiple file system paths for an inode.
- * total_bytes to allocate are passed, note that space usable for actual path
- * information will be total_bytes - sizeof(struct inode_fs_paths).
- * the returned pointer must be freed with free_ipath() in the end.
- */
 struct btrfs_data_container *init_data_container(u32 total_bytes)
 {
 	struct btrfs_data_container *data;
@@ -1403,5 +1414,6 @@
 
 void free_ipath(struct inode_fs_paths *ipath)
 {
+	kfree(ipath->fspath);
 	kfree(ipath);
 }
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index d00dfa9..57ea2e9 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -22,6 +22,8 @@
 #include "ioctl.h"
 #include "ulist.h"
 
+#define BTRFS_BACKREF_SEARCH_COMMIT_ROOT ((struct btrfs_trans_handle *)0)
+
 struct inode_fs_paths {
 	struct btrfs_path		*btrfs_path;
 	struct btrfs_root		*fs_root;
@@ -44,9 +46,8 @@
 				u64 *out_root, u8 *out_level);
 
 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
-				struct btrfs_path *path,
 				u64 extent_item_objectid,
-				u64 extent_offset,
+				u64 extent_offset, int search_commit_root,
 				iterate_extent_inodes_t *iterate, void *ctx);
 
 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b805afb..d286b40 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -226,8 +226,8 @@
  * Clear the writeback bits on all of the file
  * pages for a compressed write
  */
-static noinline int end_compressed_writeback(struct inode *inode, u64 start,
-					     unsigned long ram_size)
+static noinline void end_compressed_writeback(struct inode *inode, u64 start,
+					      unsigned long ram_size)
 {
 	unsigned long index = start >> PAGE_CACHE_SHIFT;
 	unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
@@ -253,7 +253,6 @@
 		index += ret;
 	}
 	/* the inode may be gone now */
-	return 0;
 }
 
 /*
@@ -392,16 +391,16 @@
 			 */
 			atomic_inc(&cb->pending_bios);
 			ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 
 			if (!skip_sum) {
 				ret = btrfs_csum_one_bio(root, inode, bio,
 							 start, 1);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM */
 			}
 
 			ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 
 			bio_put(bio);
 
@@ -421,15 +420,15 @@
 	bio_get(bio);
 
 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 
 	if (!skip_sum) {
 		ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 
 	ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 
 	bio_put(bio);
 	return 0;
@@ -497,7 +496,7 @@
 		 * sure they map to this compressed extent on disk.
 		 */
 		set_page_extent_mapped(page);
-		lock_extent(tree, last_offset, end, GFP_NOFS);
+		lock_extent(tree, last_offset, end);
 		read_lock(&em_tree->lock);
 		em = lookup_extent_mapping(em_tree, last_offset,
 					   PAGE_CACHE_SIZE);
@@ -507,7 +506,7 @@
 		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
 		    (em->block_start >> 9) != cb->orig_bio->bi_sector) {
 			free_extent_map(em);
-			unlock_extent(tree, last_offset, end, GFP_NOFS);
+			unlock_extent(tree, last_offset, end);
 			unlock_page(page);
 			page_cache_release(page);
 			break;
@@ -535,7 +534,7 @@
 			nr_pages++;
 			page_cache_release(page);
 		} else {
-			unlock_extent(tree, last_offset, end, GFP_NOFS);
+			unlock_extent(tree, last_offset, end);
 			unlock_page(page);
 			page_cache_release(page);
 			break;
@@ -662,7 +661,7 @@
 			bio_get(comp_bio);
 
 			ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 
 			/*
 			 * inc the count before we submit the bio so
@@ -675,14 +674,14 @@
 			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 				ret = btrfs_lookup_bio_sums(root, inode,
 							comp_bio, sums);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM */
 			}
 			sums += (comp_bio->bi_size + root->sectorsize - 1) /
 				root->sectorsize;
 
 			ret = btrfs_map_bio(root, READ, comp_bio,
 					    mirror_num, 0);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 
 			bio_put(comp_bio);
 
@@ -698,15 +697,15 @@
 	bio_get(comp_bio);
 
 	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 
 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 		ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 
 	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 
 	bio_put(comp_bio);
 	return 0;
@@ -734,7 +733,7 @@
 	&btrfs_lzo_compress,
 };
 
-int __init btrfs_init_compress(void)
+void __init btrfs_init_compress(void)
 {
 	int i;
 
@@ -744,7 +743,6 @@
 		atomic_set(&comp_alloc_workspace[i], 0);
 		init_waitqueue_head(&comp_workspace_wait[i]);
 	}
-	return 0;
 }
 
 /*
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index a12059f..9afb0a6 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -19,7 +19,7 @@
 #ifndef __BTRFS_COMPRESSION_
 #define __BTRFS_COMPRESSION_
 
-int btrfs_init_compress(void);
+void btrfs_init_compress(void);
 void btrfs_exit_compress(void);
 
 int btrfs_compress_pages(int type, struct address_space *mapping,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0639a55..e801f22 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -36,7 +36,7 @@
 			      struct btrfs_root *root,
 			      struct extent_buffer *dst_buf,
 			      struct extent_buffer *src_buf);
-static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 		   struct btrfs_path *path, int level, int slot);
 
 struct btrfs_path *btrfs_alloc_path(void)
@@ -156,10 +156,23 @@
 {
 	struct extent_buffer *eb;
 
-	rcu_read_lock();
-	eb = rcu_dereference(root->node);
-	extent_buffer_get(eb);
-	rcu_read_unlock();
+	while (1) {
+		rcu_read_lock();
+		eb = rcu_dereference(root->node);
+
+		/*
+		 * RCU really hurts here, we could free up the root node because
+		 * it was cow'ed but we may not get the new root node yet so do
+		 * the inc_not_zero dance and if it doesn't work then
+		 * synchronize_rcu and try again.
+		 */
+		if (atomic_inc_not_zero(&eb->refs)) {
+			rcu_read_unlock();
+			break;
+		}
+		rcu_read_unlock();
+		synchronize_rcu();
+	}
 	return eb;
 }
 
@@ -331,8 +344,13 @@
 	if (btrfs_block_can_be_shared(root, buf)) {
 		ret = btrfs_lookup_extent_info(trans, root, buf->start,
 					       buf->len, &refs, &flags);
-		BUG_ON(ret);
-		BUG_ON(refs == 0);
+		if (ret)
+			return ret;
+		if (refs == 0) {
+			ret = -EROFS;
+			btrfs_std_error(root->fs_info, ret);
+			return ret;
+		}
 	} else {
 		refs = 1;
 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
@@ -351,14 +369,14 @@
 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
 			ret = btrfs_inc_ref(trans, root, buf, 1, 1);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 
 			if (root->root_key.objectid ==
 			    BTRFS_TREE_RELOC_OBJECTID) {
 				ret = btrfs_dec_ref(trans, root, buf, 0, 1);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM */
 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM */
 			}
 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
 		} else {
@@ -368,14 +386,15 @@
 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
 			else
 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 		}
 		if (new_flags != 0) {
 			ret = btrfs_set_disk_extent_flags(trans, root,
 							  buf->start,
 							  buf->len,
 							  new_flags, 0);
-			BUG_ON(ret);
+			if (ret)
+				return ret;
 		}
 	} else {
 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
@@ -384,9 +403,9 @@
 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
 			else
 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 			ret = btrfs_dec_ref(trans, root, buf, 1, 1);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 		}
 		clean_tree_block(trans, root, buf);
 		*last_ref = 1;
@@ -415,7 +434,7 @@
 {
 	struct btrfs_disk_key disk_key;
 	struct extent_buffer *cow;
-	int level;
+	int level, ret;
 	int last_ref = 0;
 	int unlock_orig = 0;
 	u64 parent_start;
@@ -467,7 +486,11 @@
 			    (unsigned long)btrfs_header_fsid(cow),
 			    BTRFS_FSID_SIZE);
 
-	update_ref_for_cow(trans, root, buf, cow, &last_ref);
+	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		return ret;
+	}
 
 	if (root->ref_cows)
 		btrfs_reloc_cow_block(trans, root, buf, cow);
@@ -504,7 +527,7 @@
 	}
 	if (unlock_orig)
 		btrfs_tree_unlock(buf);
-	free_extent_buffer(buf);
+	free_extent_buffer_stale(buf);
 	btrfs_mark_buffer_dirty(cow);
 	*cow_ret = cow;
 	return 0;
@@ -934,7 +957,12 @@
 
 		/* promote the child to a root */
 		child = read_node_slot(root, mid, 0);
-		BUG_ON(!child);
+		if (!child) {
+			ret = -EROFS;
+			btrfs_std_error(root->fs_info, ret);
+			goto enospc;
+		}
+
 		btrfs_tree_lock(child);
 		btrfs_set_lock_blocking(child);
 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
@@ -959,7 +987,7 @@
 		root_sub_used(root, mid->len);
 		btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
 		/* once for the root ptr */
-		free_extent_buffer(mid);
+		free_extent_buffer_stale(mid);
 		return 0;
 	}
 	if (btrfs_header_nritems(mid) >
@@ -1010,13 +1038,10 @@
 		if (btrfs_header_nritems(right) == 0) {
 			clean_tree_block(trans, root, right);
 			btrfs_tree_unlock(right);
-			wret = del_ptr(trans, root, path, level + 1, pslot +
-				       1);
-			if (wret)
-				ret = wret;
+			del_ptr(trans, root, path, level + 1, pslot + 1);
 			root_sub_used(root, right->len);
 			btrfs_free_tree_block(trans, root, right, 0, 1, 0);
-			free_extent_buffer(right);
+			free_extent_buffer_stale(right);
 			right = NULL;
 		} else {
 			struct btrfs_disk_key right_key;
@@ -1035,7 +1060,11 @@
 		 * otherwise we would have pulled some pointers from the
 		 * right
 		 */
-		BUG_ON(!left);
+		if (!left) {
+			ret = -EROFS;
+			btrfs_std_error(root->fs_info, ret);
+			goto enospc;
+		}
 		wret = balance_node_right(trans, root, mid, left);
 		if (wret < 0) {
 			ret = wret;
@@ -1051,12 +1080,10 @@
 	if (btrfs_header_nritems(mid) == 0) {
 		clean_tree_block(trans, root, mid);
 		btrfs_tree_unlock(mid);
-		wret = del_ptr(trans, root, path, level + 1, pslot);
-		if (wret)
-			ret = wret;
+		del_ptr(trans, root, path, level + 1, pslot);
 		root_sub_used(root, mid->len);
 		btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
-		free_extent_buffer(mid);
+		free_extent_buffer_stale(mid);
 		mid = NULL;
 	} else {
 		/* update the parent key to reflect our changes */
@@ -1382,7 +1409,8 @@
  * if lowest_unlock is 1, level 0 won't be unlocked
  */
 static noinline void unlock_up(struct btrfs_path *path, int level,
-			       int lowest_unlock)
+			       int lowest_unlock, int min_write_lock_level,
+			       int *write_lock_level)
 {
 	int i;
 	int skip_level = level;
@@ -1414,6 +1442,11 @@
 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
 			btrfs_tree_unlock_rw(t, path->locks[i]);
 			path->locks[i] = 0;
+			if (write_lock_level &&
+			    i > min_write_lock_level &&
+			    i <= *write_lock_level) {
+				*write_lock_level = i - 1;
+			}
 		}
 	}
 }
@@ -1637,6 +1670,7 @@
 	/* everything at write_lock_level or lower must be write locked */
 	int write_lock_level = 0;
 	u8 lowest_level = 0;
+	int min_write_lock_level;
 
 	lowest_level = p->lowest_level;
 	WARN_ON(lowest_level && ins_len > 0);
@@ -1664,6 +1698,8 @@
 	if (cow && (p->keep_locks || p->lowest_level))
 		write_lock_level = BTRFS_MAX_LEVEL;
 
+	min_write_lock_level = write_lock_level;
+
 again:
 	/*
 	 * we try very hard to do read locks on the root
@@ -1795,7 +1831,8 @@
 				goto again;
 			}
 
-			unlock_up(p, level, lowest_unlock);
+			unlock_up(p, level, lowest_unlock,
+				  min_write_lock_level, &write_lock_level);
 
 			if (level == lowest_level) {
 				if (dec)
@@ -1857,7 +1894,8 @@
 				}
 			}
 			if (!p->search_for_split)
-				unlock_up(p, level, lowest_unlock);
+				unlock_up(p, level, lowest_unlock,
+					  min_write_lock_level, &write_lock_level);
 			goto done;
 		}
 	}
@@ -1881,15 +1919,12 @@
  * fixing up pointers when a given leaf/node is not in slot 0 of the
  * higher levels
  *
- * If this fails to write a tree block, it returns -1, but continues
- * fixing up the blocks in ram so the tree is consistent.
  */
-static int fixup_low_keys(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root, struct btrfs_path *path,
-			  struct btrfs_disk_key *key, int level)
+static void fixup_low_keys(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root, struct btrfs_path *path,
+			   struct btrfs_disk_key *key, int level)
 {
 	int i;
-	int ret = 0;
 	struct extent_buffer *t;
 
 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
@@ -1902,7 +1937,6 @@
 		if (tslot != 0)
 			break;
 	}
-	return ret;
 }
 
 /*
@@ -1911,9 +1945,9 @@
  * This function isn't completely safe. It's the caller's responsibility
  * that the new key won't break the order
  */
-int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, struct btrfs_path *path,
-			    struct btrfs_key *new_key)
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, struct btrfs_path *path,
+			     struct btrfs_key *new_key)
 {
 	struct btrfs_disk_key disk_key;
 	struct extent_buffer *eb;
@@ -1923,13 +1957,11 @@
 	slot = path->slots[0];
 	if (slot > 0) {
 		btrfs_item_key(eb, &disk_key, slot - 1);
-		if (comp_keys(&disk_key, new_key) >= 0)
-			return -1;
+		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
 	}
 	if (slot < btrfs_header_nritems(eb) - 1) {
 		btrfs_item_key(eb, &disk_key, slot + 1);
-		if (comp_keys(&disk_key, new_key) <= 0)
-			return -1;
+		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
 	}
 
 	btrfs_cpu_key_to_disk(&disk_key, new_key);
@@ -1937,7 +1969,6 @@
 	btrfs_mark_buffer_dirty(eb);
 	if (slot == 0)
 		fixup_low_keys(trans, root, path, &disk_key, 1);
-	return 0;
 }
 
 /*
@@ -2140,12 +2171,11 @@
  *
  * slot and level indicate where you want the key to go, and
  * blocknr is the block the key points to.
- *
- * returns zero on success and < 0 on any error
  */
-static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
-		      *root, struct btrfs_path *path, struct btrfs_disk_key
-		      *key, u64 bytenr, int slot, int level)
+static void insert_ptr(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root, struct btrfs_path *path,
+		       struct btrfs_disk_key *key, u64 bytenr,
+		       int slot, int level)
 {
 	struct extent_buffer *lower;
 	int nritems;
@@ -2155,8 +2185,7 @@
 	lower = path->nodes[level];
 	nritems = btrfs_header_nritems(lower);
 	BUG_ON(slot > nritems);
-	if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
-		BUG();
+	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
 	if (slot != nritems) {
 		memmove_extent_buffer(lower,
 			      btrfs_node_key_ptr_offset(slot + 1),
@@ -2169,7 +2198,6 @@
 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
 	btrfs_set_header_nritems(lower, nritems + 1);
 	btrfs_mark_buffer_dirty(lower);
-	return 0;
 }
 
 /*
@@ -2190,7 +2218,6 @@
 	struct btrfs_disk_key disk_key;
 	int mid;
 	int ret;
-	int wret;
 	u32 c_nritems;
 
 	c = path->nodes[level];
@@ -2247,11 +2274,8 @@
 	btrfs_mark_buffer_dirty(c);
 	btrfs_mark_buffer_dirty(split);
 
-	wret = insert_ptr(trans, root, path, &disk_key, split->start,
-			  path->slots[level + 1] + 1,
-			  level + 1);
-	if (wret)
-		ret = wret;
+	insert_ptr(trans, root, path, &disk_key, split->start,
+		   path->slots[level + 1] + 1, level + 1);
 
 	if (path->slots[level] >= mid) {
 		path->slots[level] -= mid;
@@ -2320,6 +2344,7 @@
 {
 	struct extent_buffer *left = path->nodes[0];
 	struct extent_buffer *upper = path->nodes[1];
+	struct btrfs_map_token token;
 	struct btrfs_disk_key disk_key;
 	int slot;
 	u32 i;
@@ -2331,6 +2356,8 @@
 	u32 data_end;
 	u32 this_item_size;
 
+	btrfs_init_map_token(&token);
+
 	if (empty)
 		nr = 0;
 	else
@@ -2408,8 +2435,8 @@
 	push_space = BTRFS_LEAF_DATA_SIZE(root);
 	for (i = 0; i < right_nritems; i++) {
 		item = btrfs_item_nr(right, i);
-		push_space -= btrfs_item_size(right, item);
-		btrfs_set_item_offset(right, item, push_space);
+		push_space -= btrfs_token_item_size(right, item, &token);
+		btrfs_set_token_item_offset(right, item, push_space, &token);
 	}
 
 	left_nritems -= push_items;
@@ -2537,9 +2564,11 @@
 	u32 old_left_nritems;
 	u32 nr;
 	int ret = 0;
-	int wret;
 	u32 this_item_size;
 	u32 old_left_item_size;
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
 
 	if (empty)
 		nr = min(right_nritems, max_slot);
@@ -2600,9 +2629,10 @@
 
 		item = btrfs_item_nr(left, i);
 
-		ioff = btrfs_item_offset(left, item);
-		btrfs_set_item_offset(left, item,
-		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
+		ioff = btrfs_token_item_offset(left, item, &token);
+		btrfs_set_token_item_offset(left, item,
+		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
+		      &token);
 	}
 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
 
@@ -2632,8 +2662,9 @@
 	for (i = 0; i < right_nritems; i++) {
 		item = btrfs_item_nr(right, i);
 
-		push_space = push_space - btrfs_item_size(right, item);
-		btrfs_set_item_offset(right, item, push_space);
+		push_space = push_space - btrfs_token_item_size(right,
+								item, &token);
+		btrfs_set_token_item_offset(right, item, push_space, &token);
 	}
 
 	btrfs_mark_buffer_dirty(left);
@@ -2643,9 +2674,7 @@
 		clean_tree_block(trans, root, right);
 
 	btrfs_item_key(right, &disk_key, 0);
-	wret = fixup_low_keys(trans, root, path, &disk_key, 1);
-	if (wret)
-		ret = wret;
+	fixup_low_keys(trans, root, path, &disk_key, 1);
 
 	/* then fixup the leaf pointer in the path */
 	if (path->slots[0] < push_items) {
@@ -2716,7 +2745,8 @@
 			      path->nodes[1], slot - 1, &left);
 	if (ret) {
 		/* we hit -ENOSPC, but it isn't fatal here */
-		ret = 1;
+		if (ret == -ENOSPC)
+			ret = 1;
 		goto out;
 	}
 
@@ -2738,22 +2768,21 @@
 /*
  * split the path's leaf in two, making sure there is at least data_size
  * available for the resulting leaf level of the path.
- *
- * returns 0 if all went well and < 0 on failure.
  */
-static noinline int copy_for_split(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root,
-			       struct btrfs_path *path,
-			       struct extent_buffer *l,
-			       struct extent_buffer *right,
-			       int slot, int mid, int nritems)
+static noinline void copy_for_split(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root,
+				    struct btrfs_path *path,
+				    struct extent_buffer *l,
+				    struct extent_buffer *right,
+				    int slot, int mid, int nritems)
 {
 	int data_copy_size;
 	int rt_data_off;
 	int i;
-	int ret = 0;
-	int wret;
 	struct btrfs_disk_key disk_key;
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
 
 	nritems = nritems - mid;
 	btrfs_set_header_nritems(right, nritems);
@@ -2775,17 +2804,15 @@
 		struct btrfs_item *item = btrfs_item_nr(right, i);
 		u32 ioff;
 
-		ioff = btrfs_item_offset(right, item);
-		btrfs_set_item_offset(right, item, ioff + rt_data_off);
+		ioff = btrfs_token_item_offset(right, item, &token);
+		btrfs_set_token_item_offset(right, item,
+					    ioff + rt_data_off, &token);
 	}
 
 	btrfs_set_header_nritems(l, mid);
-	ret = 0;
 	btrfs_item_key(right, &disk_key, 0);
-	wret = insert_ptr(trans, root, path, &disk_key, right->start,
-			  path->slots[1] + 1, 1);
-	if (wret)
-		ret = wret;
+	insert_ptr(trans, root, path, &disk_key, right->start,
+		   path->slots[1] + 1, 1);
 
 	btrfs_mark_buffer_dirty(right);
 	btrfs_mark_buffer_dirty(l);
@@ -2803,8 +2830,6 @@
 	}
 
 	BUG_ON(path->slots[0] < 0);
-
-	return ret;
 }
 
 /*
@@ -2993,12 +3018,8 @@
 	if (split == 0) {
 		if (mid <= slot) {
 			btrfs_set_header_nritems(right, 0);
-			wret = insert_ptr(trans, root, path,
-					  &disk_key, right->start,
-					  path->slots[1] + 1, 1);
-			if (wret)
-				ret = wret;
-
+			insert_ptr(trans, root, path, &disk_key, right->start,
+				   path->slots[1] + 1, 1);
 			btrfs_tree_unlock(path->nodes[0]);
 			free_extent_buffer(path->nodes[0]);
 			path->nodes[0] = right;
@@ -3006,29 +3027,21 @@
 			path->slots[1] += 1;
 		} else {
 			btrfs_set_header_nritems(right, 0);
-			wret = insert_ptr(trans, root, path,
-					  &disk_key,
-					  right->start,
+			insert_ptr(trans, root, path, &disk_key, right->start,
 					  path->slots[1], 1);
-			if (wret)
-				ret = wret;
 			btrfs_tree_unlock(path->nodes[0]);
 			free_extent_buffer(path->nodes[0]);
 			path->nodes[0] = right;
 			path->slots[0] = 0;
-			if (path->slots[1] == 0) {
-				wret = fixup_low_keys(trans, root,
-						path, &disk_key, 1);
-				if (wret)
-					ret = wret;
-			}
+			if (path->slots[1] == 0)
+				fixup_low_keys(trans, root, path,
+					       &disk_key, 1);
 		}
 		btrfs_mark_buffer_dirty(right);
 		return ret;
 	}
 
-	ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
-	BUG_ON(ret);
+	copy_for_split(trans, root, path, l, right, slot, mid, nritems);
 
 	if (split == 2) {
 		BUG_ON(num_doubles != 0);
@@ -3036,7 +3049,7 @@
 		goto again;
 	}
 
-	return ret;
+	return 0;
 
 push_for_double:
 	push_for_double_split(trans, root, path, data_size);
@@ -3238,11 +3251,9 @@
 		return ret;
 
 	path->slots[0]++;
-	ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
-				     item_size, item_size +
-				     sizeof(struct btrfs_item), 1);
-	BUG_ON(ret);
-
+	setup_items_for_insert(trans, root, path, new_key, &item_size,
+			       item_size, item_size +
+			       sizeof(struct btrfs_item), 1);
 	leaf = path->nodes[0];
 	memcpy_extent_buffer(leaf,
 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -3257,10 +3268,10 @@
  * off the end of the item or if we shift the item to chop bytes off
  * the front.
  */
-int btrfs_truncate_item(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root,
-			struct btrfs_path *path,
-			u32 new_size, int from_end)
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root,
+			 struct btrfs_path *path,
+			 u32 new_size, int from_end)
 {
 	int slot;
 	struct extent_buffer *leaf;
@@ -3271,13 +3282,16 @@
 	unsigned int old_size;
 	unsigned int size_diff;
 	int i;
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
 
 	leaf = path->nodes[0];
 	slot = path->slots[0];
 
 	old_size = btrfs_item_size_nr(leaf, slot);
 	if (old_size == new_size)
-		return 0;
+		return;
 
 	nritems = btrfs_header_nritems(leaf);
 	data_end = leaf_data_end(root, leaf);
@@ -3297,8 +3311,9 @@
 		u32 ioff;
 		item = btrfs_item_nr(leaf, i);
 
-		ioff = btrfs_item_offset(leaf, item);
-		btrfs_set_item_offset(leaf, item, ioff + size_diff);
+		ioff = btrfs_token_item_offset(leaf, item, &token);
+		btrfs_set_token_item_offset(leaf, item,
+					    ioff + size_diff, &token);
 	}
 
 	/* shift the data */
@@ -3350,15 +3365,14 @@
 		btrfs_print_leaf(root, leaf);
 		BUG();
 	}
-	return 0;
 }
 
 /*
  * make the item pointed to by the path bigger, data_size is the new size.
  */
-int btrfs_extend_item(struct btrfs_trans_handle *trans,
-		      struct btrfs_root *root, struct btrfs_path *path,
-		      u32 data_size)
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root, struct btrfs_path *path,
+		       u32 data_size)
 {
 	int slot;
 	struct extent_buffer *leaf;
@@ -3368,6 +3382,9 @@
 	unsigned int old_data;
 	unsigned int old_size;
 	int i;
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
 
 	leaf = path->nodes[0];
 
@@ -3397,8 +3414,9 @@
 		u32 ioff;
 		item = btrfs_item_nr(leaf, i);
 
-		ioff = btrfs_item_offset(leaf, item);
-		btrfs_set_item_offset(leaf, item, ioff - data_size);
+		ioff = btrfs_token_item_offset(leaf, item, &token);
+		btrfs_set_token_item_offset(leaf, item,
+					    ioff - data_size, &token);
 	}
 
 	/* shift the data */
@@ -3416,7 +3434,6 @@
 		btrfs_print_leaf(root, leaf);
 		BUG();
 	}
-	return 0;
 }
 
 /*
@@ -3441,6 +3458,9 @@
 	unsigned int data_end;
 	struct btrfs_disk_key disk_key;
 	struct btrfs_key found_key;
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
 
 	for (i = 0; i < nr; i++) {
 		if (total_size + data_size[i] + sizeof(struct btrfs_item) >
@@ -3506,8 +3526,9 @@
 			u32 ioff;
 
 			item = btrfs_item_nr(leaf, i);
-			ioff = btrfs_item_offset(leaf, item);
-			btrfs_set_item_offset(leaf, item, ioff - total_data);
+			ioff = btrfs_token_item_offset(leaf, item, &token);
+			btrfs_set_token_item_offset(leaf, item,
+						    ioff - total_data, &token);
 		}
 		/* shift the items */
 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
@@ -3534,9 +3555,10 @@
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
 		btrfs_set_item_key(leaf, &disk_key, slot + i);
 		item = btrfs_item_nr(leaf, slot + i);
-		btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
+		btrfs_set_token_item_offset(leaf, item,
+					    data_end - data_size[i], &token);
 		data_end -= data_size[i];
-		btrfs_set_item_size(leaf, item, data_size[i]);
+		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
 	}
 	btrfs_set_header_nritems(leaf, nritems + nr);
 	btrfs_mark_buffer_dirty(leaf);
@@ -3544,7 +3566,7 @@
 	ret = 0;
 	if (slot == 0) {
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
-		ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+		fixup_low_keys(trans, root, path, &disk_key, 1);
 	}
 
 	if (btrfs_leaf_free_space(root, leaf) < 0) {
@@ -3562,19 +3584,21 @@
  * to save stack depth by doing the bulk of the work in a function
  * that doesn't call btrfs_search_slot
  */
-int setup_items_for_insert(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, struct btrfs_path *path,
-			   struct btrfs_key *cpu_key, u32 *data_size,
-			   u32 total_data, u32 total_size, int nr)
+void setup_items_for_insert(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, struct btrfs_path *path,
+			    struct btrfs_key *cpu_key, u32 *data_size,
+			    u32 total_data, u32 total_size, int nr)
 {
 	struct btrfs_item *item;
 	int i;
 	u32 nritems;
 	unsigned int data_end;
 	struct btrfs_disk_key disk_key;
-	int ret;
 	struct extent_buffer *leaf;
 	int slot;
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
 
 	leaf = path->nodes[0];
 	slot = path->slots[0];
@@ -3606,8 +3630,9 @@
 			u32 ioff;
 
 			item = btrfs_item_nr(leaf, i);
-			ioff = btrfs_item_offset(leaf, item);
-			btrfs_set_item_offset(leaf, item, ioff - total_data);
+			ioff = btrfs_token_item_offset(leaf, item, &token);
+			btrfs_set_token_item_offset(leaf, item,
+						    ioff - total_data, &token);
 		}
 		/* shift the items */
 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
@@ -3626,17 +3651,17 @@
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
 		btrfs_set_item_key(leaf, &disk_key, slot + i);
 		item = btrfs_item_nr(leaf, slot + i);
-		btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
+		btrfs_set_token_item_offset(leaf, item,
+					    data_end - data_size[i], &token);
 		data_end -= data_size[i];
-		btrfs_set_item_size(leaf, item, data_size[i]);
+		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
 	}
 
 	btrfs_set_header_nritems(leaf, nritems + nr);
 
-	ret = 0;
 	if (slot == 0) {
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
-		ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+		fixup_low_keys(trans, root, path, &disk_key, 1);
 	}
 	btrfs_unlock_up_safe(path, 1);
 	btrfs_mark_buffer_dirty(leaf);
@@ -3645,7 +3670,6 @@
 		btrfs_print_leaf(root, leaf);
 		BUG();
 	}
-	return ret;
 }
 
 /*
@@ -3672,16 +3696,14 @@
 	if (ret == 0)
 		return -EEXIST;
 	if (ret < 0)
-		goto out;
+		return ret;
 
 	slot = path->slots[0];
 	BUG_ON(slot < 0);
 
-	ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
+	setup_items_for_insert(trans, root, path, cpu_key, data_size,
 			       total_data, total_size, nr);
-
-out:
-	return ret;
+	return 0;
 }
 
 /*
@@ -3717,13 +3739,11 @@
  * the tree should have been previously balanced so the deletion does not
  * empty a node.
  */
-static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-		   struct btrfs_path *path, int level, int slot)
+static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		    struct btrfs_path *path, int level, int slot)
 {
 	struct extent_buffer *parent = path->nodes[level];
 	u32 nritems;
-	int ret = 0;
-	int wret;
 
 	nritems = btrfs_header_nritems(parent);
 	if (slot != nritems - 1) {
@@ -3743,12 +3763,9 @@
 		struct btrfs_disk_key disk_key;
 
 		btrfs_node_key(parent, &disk_key, 0);
-		wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
-		if (wret)
-			ret = wret;
+		fixup_low_keys(trans, root, path, &disk_key, level + 1);
 	}
 	btrfs_mark_buffer_dirty(parent);
-	return ret;
 }
 
 /*
@@ -3761,17 +3778,13 @@
  * The path must have already been setup for deleting the leaf, including
  * all the proper balancing.  path->nodes[1] must be locked.
  */
-static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root,
-				   struct btrfs_path *path,
-				   struct extent_buffer *leaf)
+static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root,
+				    struct btrfs_path *path,
+				    struct extent_buffer *leaf)
 {
-	int ret;
-
 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
-	ret = del_ptr(trans, root, path, 1, path->slots[1]);
-	if (ret)
-		return ret;
+	del_ptr(trans, root, path, 1, path->slots[1]);
 
 	/*
 	 * btrfs_free_extent is expensive, we want to make sure we
@@ -3781,8 +3794,9 @@
 
 	root_sub_used(root, leaf->len);
 
+	extent_buffer_get(leaf);
 	btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
-	return 0;
+	free_extent_buffer_stale(leaf);
 }
 /*
  * delete the item at the leaf level in path.  If that empties
@@ -3799,6 +3813,9 @@
 	int wret;
 	int i;
 	u32 nritems;
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
 
 	leaf = path->nodes[0];
 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
@@ -3820,8 +3837,9 @@
 			u32 ioff;
 
 			item = btrfs_item_nr(leaf, i);
-			ioff = btrfs_item_offset(leaf, item);
-			btrfs_set_item_offset(leaf, item, ioff + dsize);
+			ioff = btrfs_token_item_offset(leaf, item, &token);
+			btrfs_set_token_item_offset(leaf, item,
+						    ioff + dsize, &token);
 		}
 
 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
@@ -3839,8 +3857,7 @@
 		} else {
 			btrfs_set_path_blocking(path);
 			clean_tree_block(trans, root, leaf);
-			ret = btrfs_del_leaf(trans, root, path, leaf);
-			BUG_ON(ret);
+			btrfs_del_leaf(trans, root, path, leaf);
 		}
 	} else {
 		int used = leaf_space_used(leaf, 0, nritems);
@@ -3848,10 +3865,7 @@
 			struct btrfs_disk_key disk_key;
 
 			btrfs_item_key(leaf, &disk_key, 0);
-			wret = fixup_low_keys(trans, root, path,
-					      &disk_key, 1);
-			if (wret)
-				ret = wret;
+			fixup_low_keys(trans, root, path, &disk_key, 1);
 		}
 
 		/* delete the leaf if it is mostly empty */
@@ -3879,9 +3893,9 @@
 
 			if (btrfs_header_nritems(leaf) == 0) {
 				path->slots[1] = slot;
-				ret = btrfs_del_leaf(trans, root, path, leaf);
-				BUG_ON(ret);
+				btrfs_del_leaf(trans, root, path, leaf);
 				free_extent_buffer(leaf);
+				ret = 0;
 			} else {
 				/* if we're still in the path, make sure
 				 * we're dirty.  Otherwise, one of the
@@ -4059,18 +4073,18 @@
 		path->slots[level] = slot;
 		if (level == path->lowest_level) {
 			ret = 0;
-			unlock_up(path, level, 1);
+			unlock_up(path, level, 1, 0, NULL);
 			goto out;
 		}
 		btrfs_set_path_blocking(path);
 		cur = read_node_slot(root, cur, slot);
-		BUG_ON(!cur);
+		BUG_ON(!cur); /* -ENOMEM */
 
 		btrfs_tree_read_lock(cur);
 
 		path->locks[level - 1] = BTRFS_READ_LOCK;
 		path->nodes[level - 1] = cur;
-		unlock_up(path, level, 1);
+		unlock_up(path, level, 1, 0, NULL);
 		btrfs_clear_path_blocking(path, NULL, 0);
 	}
 out:
@@ -4306,7 +4320,7 @@
 	}
 	ret = 0;
 done:
-	unlock_up(path, 0, 1);
+	unlock_up(path, 0, 1, 0, NULL);
 	path->leave_spinning = old_spinning;
 	if (!old_spinning)
 		btrfs_set_path_blocking(path);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80b6486..5b8ef8e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -48,6 +48,8 @@
 
 #define BTRFS_MAGIC "_BHRfS_M"
 
+#define BTRFS_MAX_MIRRORS 2
+
 #define BTRFS_MAX_LEVEL 8
 
 #define BTRFS_COMPAT_EXTENT_TREE_V0
@@ -138,6 +140,12 @@
 #define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2
 
 /*
+ * the max metadata block size.  This limit is somewhat artificial,
+ * but the memmove costs go through the roof for larger blocks.
+ */
+#define BTRFS_MAX_METADATA_BLOCKSIZE 65536
+
+/*
  * we can actually store much bigger names, but lets not confuse the rest
  * of linux
  */
@@ -461,6 +469,19 @@
 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL	(1ULL << 1)
 #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS	(1ULL << 2)
 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO	(1ULL << 3)
+/*
+ * some patches floated around with a second compression method
+ * lets save that incompat here for when they do get in
+ * Note we don't actually support it, we're just reserving the
+ * number
+ */
+#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2	(1ULL << 4)
+
+/*
+ * older kernels tried to do bigger metadata blocks, but the
+ * code was pretty buggy.  Lets not let them try anymore.
+ */
+#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA	(1ULL << 5)
 
 #define BTRFS_FEATURE_COMPAT_SUPP		0ULL
 #define BTRFS_FEATURE_COMPAT_RO_SUPP		0ULL
@@ -468,6 +489,7 @@
 	(BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF |		\
 	 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL |	\
 	 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS |		\
+	 BTRFS_FEATURE_INCOMPAT_BIG_METADATA |		\
 	 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO)
 
 /*
@@ -829,6 +851,21 @@
  */
 #define BTRFS_AVAIL_ALLOC_BIT_SINGLE	(1ULL << 48)
 
+#define BTRFS_EXTENDED_PROFILE_MASK	(BTRFS_BLOCK_GROUP_PROFILE_MASK | \
+					 BTRFS_AVAIL_ALLOC_BIT_SINGLE)
+
+static inline u64 chunk_to_extended(u64 flags)
+{
+	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)
+		flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+	return flags;
+}
+static inline u64 extended_to_chunk(u64 flags)
+{
+	return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+}
+
 struct btrfs_block_group_item {
 	__le64 used;
 	__le64 chunk_objectid;
@@ -1503,6 +1540,7 @@
 #define BTRFS_MOUNT_SKIP_BALANCE	(1 << 19)
 #define BTRFS_MOUNT_CHECK_INTEGRITY	(1 << 20)
 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
+#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR	(1 << 22)
 
 #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
@@ -1526,6 +1564,17 @@
 
 #define BTRFS_INODE_ROOT_ITEM_INIT	(1 << 31)
 
+struct btrfs_map_token {
+	struct extent_buffer *eb;
+	char *kaddr;
+	unsigned long offset;
+};
+
+static inline void btrfs_init_map_token (struct btrfs_map_token *token)
+{
+	memset(token, 0, sizeof(*token));
+}
+
 /* some macros to generate set/get funcs for the struct fields.  This
  * assumes there is a lefoo_to_cpu for every type, so lets make a simple
  * one for u8:
@@ -1549,20 +1598,22 @@
 #ifndef BTRFS_SETGET_FUNCS
 #define BTRFS_SETGET_FUNCS(name, type, member, bits)			\
 u##bits btrfs_##name(struct extent_buffer *eb, type *s);		\
+u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, struct btrfs_map_token *token);		\
+void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token);\
 void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
 #endif
 
 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits)		\
 static inline u##bits btrfs_##name(struct extent_buffer *eb)		\
 {									\
-	type *p = page_address(eb->first_page);				\
+	type *p = page_address(eb->pages[0]);				\
 	u##bits res = le##bits##_to_cpu(p->member);			\
 	return res;							\
 }									\
 static inline void btrfs_set_##name(struct extent_buffer *eb,		\
 				    u##bits val)			\
 {									\
-	type *p = page_address(eb->first_page);				\
+	type *p = page_address(eb->pages[0]);				\
 	p->member = cpu_to_le##bits(val);				\
 }
 
@@ -2466,8 +2517,7 @@
 				  struct btrfs_root *root,
 				  u64 num_bytes, u64 min_alloc_size,
 				  u64 empty_size, u64 hint_byte,
-				  u64 search_end, struct btrfs_key *ins,
-				  u64 data);
+				  struct btrfs_key *ins, u64 data);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 		  struct extent_buffer *buf, int full_backref, int for_cow);
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2484,8 +2534,8 @@
 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
 				       u64 start, u64 len);
-int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root);
+void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root);
 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root);
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2548,8 +2598,8 @@
 			     u64 num_bytes);
 int btrfs_set_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache);
-int btrfs_set_block_group_rw(struct btrfs_root *root,
-			     struct btrfs_block_group_cache *cache);
+void btrfs_set_block_group_rw(struct btrfs_root *root,
+			      struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
 int btrfs_error_unpin_extent_range(struct btrfs_root *root,
@@ -2568,9 +2618,9 @@
 int btrfs_previous_item(struct btrfs_root *root,
 			struct btrfs_path *path, u64 min_objectid,
 			int type);
-int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, struct btrfs_path *path,
-			    struct btrfs_key *new_key);
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, struct btrfs_path *path,
+			     struct btrfs_key *new_key);
 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -2590,12 +2640,13 @@
 		      struct extent_buffer **cow_ret, u64 new_root_objectid);
 int btrfs_block_can_be_shared(struct btrfs_root *root,
 			      struct extent_buffer *buf);
-int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root
-		      *root, struct btrfs_path *path, u32 data_size);
-int btrfs_truncate_item(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root,
-			struct btrfs_path *path,
-			u32 new_size, int from_end);
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root, struct btrfs_path *path,
+		       u32 data_size);
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root,
+			 struct btrfs_path *path,
+			 u32 new_size, int from_end);
 int btrfs_split_item(struct btrfs_trans_handle *trans,
 		     struct btrfs_root *root,
 		     struct btrfs_path *path,
@@ -2629,10 +2680,10 @@
 	return btrfs_del_items(trans, root, path, path->slots[0], 1);
 }
 
-int setup_items_for_insert(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, struct btrfs_path *path,
-			   struct btrfs_key *cpu_key, u32 *data_size,
-			   u32 total_data, u32 total_size, int nr);
+void setup_items_for_insert(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, struct btrfs_path *path,
+			    struct btrfs_key *cpu_key, u32 *data_size,
+			    u32 total_data, u32 total_size, int nr);
 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *key, void *data, u32 data_size);
 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
@@ -2659,9 +2710,9 @@
 }
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
-void btrfs_drop_snapshot(struct btrfs_root *root,
-			 struct btrfs_block_rsv *block_rsv, int update_ref,
-			 int for_reloc);
+int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
+				     struct btrfs_block_rsv *block_rsv,
+				     int update_ref, int for_reloc);
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 			struct btrfs_root *root,
 			struct extent_buffer *node,
@@ -2687,24 +2738,6 @@
 	kfree(fs_info->super_for_commit);
 	kfree(fs_info);
 }
-/**
- * profile_is_valid - tests whether a given profile is valid and reduced
- * @flags: profile to validate
- * @extended: if true @flags is treated as an extended profile
- */
-static inline int profile_is_valid(u64 flags, int extended)
-{
-	u64 mask = ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
-	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
-	if (extended)
-		mask &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-
-	if (flags & mask)
-		return 0;
-	/* true if zero or exactly one bit set */
-	return (flags & (~flags + 1)) == flags;
-}
 
 /* root-item.c */
 int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -2723,9 +2756,10 @@
 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *key, struct btrfs_root_item
 		      *item);
-int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
-		      *root, struct btrfs_key *key, struct btrfs_root_item
-		      *item);
+int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root,
+				   struct btrfs_key *key,
+				   struct btrfs_root_item *item);
 int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
 			 btrfs_root_item *item, struct btrfs_key *key);
 int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
@@ -2909,7 +2943,7 @@
 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
 			      struct btrfs_root *root);
 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
-int btrfs_invalidate_inodes(struct btrfs_root *root);
+void btrfs_invalidate_inodes(struct btrfs_root *root);
 void btrfs_add_delayed_iput(struct inode *inode);
 void btrfs_run_delayed_iputs(struct btrfs_root *root);
 int btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -2961,13 +2995,41 @@
 /* super.c */
 int btrfs_parse_options(struct btrfs_root *root, char *options);
 int btrfs_sync_fs(struct super_block *sb, int wait);
+void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);
 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
-		     unsigned int line, int errno);
+		     unsigned int line, int errno, const char *fmt, ...);
+
+void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root, const char *function,
+			       unsigned int line, int errno);
+
+#define btrfs_abort_transaction(trans, root, errno)		\
+do {								\
+	__btrfs_abort_transaction(trans, root, __func__,	\
+				  __LINE__, errno);		\
+} while (0)
 
 #define btrfs_std_error(fs_info, errno)				\
 do {								\
 	if ((errno))						\
-		__btrfs_std_error((fs_info), __func__, __LINE__, (errno));\
+		__btrfs_std_error((fs_info), __func__,		\
+				   __LINE__, (errno), NULL);	\
+} while (0)
+
+#define btrfs_error(fs_info, errno, fmt, args...)		\
+do {								\
+	__btrfs_std_error((fs_info), __func__, __LINE__,	\
+			  (errno), fmt, ##args);		\
+} while (0)
+
+void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+		   unsigned int line, int errno, const char *fmt, ...);
+
+#define btrfs_panic(fs_info, errno, fmt, args...)			\
+do {									\
+	struct btrfs_fs_info *_i = (fs_info);				\
+	__btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args);	\
+	BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR));	\
 } while (0)
 
 /* acl.c */
@@ -3003,16 +3065,17 @@
 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
 			      struct btrfs_pending_snapshot *pending,
 			      u64 *bytes_to_reserve);
-void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
+int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
 			      struct btrfs_pending_snapshot *pending);
 
 /* scrub.c */
 int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
 		    struct btrfs_scrub_progress *progress, int readonly);
-int btrfs_scrub_pause(struct btrfs_root *root);
-int btrfs_scrub_pause_super(struct btrfs_root *root);
-int btrfs_scrub_continue(struct btrfs_root *root);
-int btrfs_scrub_continue_super(struct btrfs_root *root);
+void btrfs_scrub_pause(struct btrfs_root *root);
+void btrfs_scrub_pause_super(struct btrfs_root *root);
+void btrfs_scrub_continue(struct btrfs_root *root);
+void btrfs_scrub_continue_super(struct btrfs_root *root);
+int __btrfs_scrub_cancel(struct btrfs_fs_info *info);
 int btrfs_scrub_cancel(struct btrfs_root *root);
 int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
 int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index fe4cd0f..03e3748 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -115,6 +115,7 @@
 	return NULL;
 }
 
+/* Will return either the node or PTR_ERR(-ENOMEM) */
 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 							struct inode *inode)
 {
@@ -836,10 +837,8 @@
 	btrfs_clear_path_blocking(path, NULL, 0);
 
 	/* insert the keys of the items */
-	ret = setup_items_for_insert(trans, root, path, keys, data_size,
-				     total_data_size, total_size, nitems);
-	if (ret)
-		goto error;
+	setup_items_for_insert(trans, root, path, keys, data_size,
+			       total_data_size, total_size, nitems);
 
 	/* insert the dir index items */
 	slot = path->slots[0];
@@ -1108,16 +1107,25 @@
 	return 0;
 }
 
-/* Called when committing the transaction. */
+/*
+ * Called when committing the transaction.
+ * Returns 0 on success.
+ * Returns < 0 on error and returns with an aborted transaction with any
+ * outstanding delayed items cleaned up.
+ */
 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
 			    struct btrfs_root *root)
 {
+	struct btrfs_root *curr_root = root;
 	struct btrfs_delayed_root *delayed_root;
 	struct btrfs_delayed_node *curr_node, *prev_node;
 	struct btrfs_path *path;
 	struct btrfs_block_rsv *block_rsv;
 	int ret = 0;
 
+	if (trans->aborted)
+		return -EIO;
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -1130,17 +1138,18 @@
 
 	curr_node = btrfs_first_delayed_node(delayed_root);
 	while (curr_node) {
-		root = curr_node->root;
-		ret = btrfs_insert_delayed_items(trans, path, root,
+		curr_root = curr_node->root;
+		ret = btrfs_insert_delayed_items(trans, path, curr_root,
 						 curr_node);
 		if (!ret)
-			ret = btrfs_delete_delayed_items(trans, path, root,
-							 curr_node);
+			ret = btrfs_delete_delayed_items(trans, path,
+						curr_root, curr_node);
 		if (!ret)
-			ret = btrfs_update_delayed_inode(trans, root, path,
-							 curr_node);
+			ret = btrfs_update_delayed_inode(trans, curr_root,
+						path, curr_node);
 		if (ret) {
 			btrfs_release_delayed_node(curr_node);
+			btrfs_abort_transaction(trans, root, ret);
 			break;
 		}
 
@@ -1151,6 +1160,7 @@
 
 	btrfs_free_path(path);
 	trans->block_rsv = block_rsv;
+
 	return ret;
 }
 
@@ -1371,6 +1381,7 @@
 	btrfs_wq_run_delayed_node(delayed_root, root, 0);
 }
 
+/* Will return 0 or -ENOMEM */
 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
 				   struct btrfs_root *root, const char *name,
 				   int name_len, struct inode *dir,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 66e4f29..69f22e3 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -420,7 +420,7 @@
  * this does all the dirty work in terms of maintaining the correct
  * overall modification count.
  */
-static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
 					struct btrfs_trans_handle *trans,
 					struct btrfs_delayed_ref_node *ref,
 					u64 bytenr, u64 num_bytes,
@@ -487,20 +487,19 @@
 		 * we've updated the existing ref, free the newly
 		 * allocated ref
 		 */
-		kfree(ref);
+		kfree(head_ref);
 	} else {
 		delayed_refs->num_heads++;
 		delayed_refs->num_heads_ready++;
 		delayed_refs->num_entries++;
 		trans->delayed_ref_updates++;
 	}
-	return 0;
 }
 
 /*
  * helper to insert a delayed tree ref into the rbtree.
  */
-static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
 					 struct btrfs_trans_handle *trans,
 					 struct btrfs_delayed_ref_node *ref,
 					 u64 bytenr, u64 num_bytes, u64 parent,
@@ -549,18 +548,17 @@
 		 * we've updated the existing ref, free the newly
 		 * allocated ref
 		 */
-		kfree(ref);
+		kfree(full_ref);
 	} else {
 		delayed_refs->num_entries++;
 		trans->delayed_ref_updates++;
 	}
-	return 0;
 }
 
 /*
  * helper to insert a delayed data ref into the rbtree.
  */
-static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
 					 struct btrfs_trans_handle *trans,
 					 struct btrfs_delayed_ref_node *ref,
 					 u64 bytenr, u64 num_bytes, u64 parent,
@@ -611,12 +609,11 @@
 		 * we've updated the existing ref, free the newly
 		 * allocated ref
 		 */
-		kfree(ref);
+		kfree(full_ref);
 	} else {
 		delayed_refs->num_entries++;
 		trans->delayed_ref_updates++;
 	}
-	return 0;
 }
 
 /*
@@ -634,7 +631,6 @@
 	struct btrfs_delayed_tree_ref *ref;
 	struct btrfs_delayed_ref_head *head_ref;
 	struct btrfs_delayed_ref_root *delayed_refs;
-	int ret;
 
 	BUG_ON(extent_op && extent_op->is_data);
 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -656,14 +652,12 @@
 	 * insert both the head node and the new ref without dropping
 	 * the spin lock
 	 */
-	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
 				   num_bytes, action, 0);
-	BUG_ON(ret);
 
-	ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
+	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
 				   num_bytes, parent, ref_root, level, action,
 				   for_cow);
-	BUG_ON(ret);
 	if (!need_ref_seq(for_cow, ref_root) &&
 	    waitqueue_active(&delayed_refs->seq_wait))
 		wake_up(&delayed_refs->seq_wait);
@@ -685,7 +679,6 @@
 	struct btrfs_delayed_data_ref *ref;
 	struct btrfs_delayed_ref_head *head_ref;
 	struct btrfs_delayed_ref_root *delayed_refs;
-	int ret;
 
 	BUG_ON(extent_op && !extent_op->is_data);
 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -707,14 +700,12 @@
 	 * insert both the head node and the new ref without dropping
 	 * the spin lock
 	 */
-	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
 				   num_bytes, action, 1);
-	BUG_ON(ret);
 
-	ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
+	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
 				   num_bytes, parent, ref_root, owner, offset,
 				   action, for_cow);
-	BUG_ON(ret);
 	if (!need_ref_seq(for_cow, ref_root) &&
 	    waitqueue_active(&delayed_refs->seq_wait))
 		wake_up(&delayed_refs->seq_wait);
@@ -729,7 +720,6 @@
 {
 	struct btrfs_delayed_ref_head *head_ref;
 	struct btrfs_delayed_ref_root *delayed_refs;
-	int ret;
 
 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
 	if (!head_ref)
@@ -740,10 +730,9 @@
 	delayed_refs = &trans->transaction->delayed_refs;
 	spin_lock(&delayed_refs->lock);
 
-	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
 				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
 				   extent_op->is_data);
-	BUG_ON(ret);
 
 	if (waitqueue_active(&delayed_refs->seq_wait))
 		wake_up(&delayed_refs->seq_wait);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 31d84e7..c1a074d 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -49,9 +49,8 @@
 		di = btrfs_match_dir_item_name(root, path, name, name_len);
 		if (di)
 			return ERR_PTR(-EEXIST);
-		ret = btrfs_extend_item(trans, root, path, data_size);
-	}
-	if (ret < 0)
+		btrfs_extend_item(trans, root, path, data_size);
+	} else if (ret < 0)
 		return ERR_PTR(ret);
 	WARN_ON(ret > 0);
 	leaf = path->nodes[0];
@@ -116,6 +115,7 @@
  * 'location' is the key to stuff into the directory item, 'type' is the
  * type of the inode we're pointing to, and 'index' is the sequence number
  * to use for the second index (if one is created).
+ * Will return 0 or -ENOMEM
  */
 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
 			  *root, const char *name, int name_len,
@@ -383,8 +383,8 @@
 		start = btrfs_item_ptr_offset(leaf, path->slots[0]);
 		memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
 			item_len - (ptr + sub_item_len - start));
-		ret = btrfs_truncate_item(trans, root, path,
-					  item_len - sub_item_len, 1);
+		btrfs_truncate_item(trans, root, path,
+				    item_len - sub_item_len, 1);
 	}
 	return ret;
 }
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 534266f..20196f4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -48,20 +48,19 @@
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
 static void free_fs_root(struct btrfs_root *root);
-static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
 				    int read_only);
-static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
-static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
+static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
+static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 				      struct btrfs_root *root);
-static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
-static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
+static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
+static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
 					struct extent_io_tree *dirty_pages,
 					int mark);
 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
 				       struct extent_io_tree *pinned_extents);
-static int btrfs_cleanup_transaction(struct btrfs_root *root);
 
 /*
  * end_io_wq structs are used to do processing in task context when an IO is
@@ -99,6 +98,7 @@
 	 */
 	u64 bio_offset;
 	struct btrfs_work work;
+	int error;
 };
 
 /*
@@ -332,8 +332,8 @@
 		return 0;
 
 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
-			 0, &cached_state, GFP_NOFS);
-	if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
+			 0, &cached_state);
+	if (extent_buffer_uptodate(eb) &&
 	    btrfs_header_generation(eb) == parent_transid) {
 		ret = 0;
 		goto out;
@@ -344,7 +344,7 @@
 		       (unsigned long long)parent_transid,
 		       (unsigned long long)btrfs_header_generation(eb));
 	ret = 1;
-	clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
+	clear_extent_buffer_uptodate(eb);
 out:
 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
 			     &cached_state, GFP_NOFS);
@@ -360,9 +360,11 @@
 					  u64 start, u64 parent_transid)
 {
 	struct extent_io_tree *io_tree;
+	int failed = 0;
 	int ret;
 	int num_copies = 0;
 	int mirror_num = 0;
+	int failed_mirror = 0;
 
 	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
@@ -370,9 +372,8 @@
 		ret = read_extent_buffer_pages(io_tree, eb, start,
 					       WAIT_COMPLETE,
 					       btree_get_extent, mirror_num);
-		if (!ret &&
-		    !verify_parent_transid(io_tree, eb, parent_transid))
-			return ret;
+		if (!ret && !verify_parent_transid(io_tree, eb, parent_transid))
+			break;
 
 		/*
 		 * This buffer's crc is fine, but its contents are corrupted, so
@@ -380,18 +381,31 @@
 		 * any less wrong.
 		 */
 		if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
-			return ret;
+			break;
+
+		if (!failed_mirror) {
+			failed = 1;
+			printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror);
+			failed_mirror = eb->failed_mirror;
+		}
 
 		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
 					      eb->start, eb->len);
 		if (num_copies == 1)
-			return ret;
+			break;
 
 		mirror_num++;
+		if (mirror_num == failed_mirror)
+			mirror_num++;
+
 		if (mirror_num > num_copies)
-			return ret;
+			break;
 	}
-	return -EIO;
+
+	if (failed && !ret)
+		repair_eb_io_failure(root, eb, failed_mirror);
+
+	return ret;
 }
 
 /*
@@ -404,50 +418,27 @@
 	struct extent_io_tree *tree;
 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
 	u64 found_start;
-	unsigned long len;
 	struct extent_buffer *eb;
-	int ret;
 
 	tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-	if (page->private == EXTENT_PAGE_PRIVATE) {
-		WARN_ON(1);
-		goto out;
-	}
-	if (!page->private) {
-		WARN_ON(1);
-		goto out;
-	}
-	len = page->private >> 2;
-	WARN_ON(len == 0);
-
-	eb = alloc_extent_buffer(tree, start, len, page);
-	if (eb == NULL) {
-		WARN_ON(1);
-		goto out;
-	}
-	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
-					     btrfs_header_generation(eb));
-	BUG_ON(ret);
-	WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
-
+	eb = (struct extent_buffer *)page->private;
+	if (page != eb->pages[0])
+		return 0;
 	found_start = btrfs_header_bytenr(eb);
 	if (found_start != start) {
 		WARN_ON(1);
-		goto err;
+		return 0;
 	}
-	if (eb->first_page != page) {
+	if (eb->pages[0] != page) {
 		WARN_ON(1);
-		goto err;
+		return 0;
 	}
 	if (!PageUptodate(page)) {
 		WARN_ON(1);
-		goto err;
+		return 0;
 	}
 	csum_tree_block(root, eb, 0);
-err:
-	free_extent_buffer(eb);
-out:
 	return 0;
 }
 
@@ -537,34 +528,74 @@
 	return 0;
 }
 
+struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
+				       struct page *page, int max_walk)
+{
+	struct extent_buffer *eb;
+	u64 start = page_offset(page);
+	u64 target = start;
+	u64 min_start;
+
+	if (start < max_walk)
+		min_start = 0;
+	else
+		min_start = start - max_walk;
+
+	while (start >= min_start) {
+		eb = find_extent_buffer(tree, start, 0);
+		if (eb) {
+			/*
+			 * we found an extent buffer and it contains our page
+			 * horray!
+			 */
+			if (eb->start <= target &&
+			    eb->start + eb->len > target)
+				return eb;
+
+			/* we found an extent buffer that wasn't for us */
+			free_extent_buffer(eb);
+			return NULL;
+		}
+		if (start == 0)
+			break;
+		start -= PAGE_CACHE_SIZE;
+	}
+	return NULL;
+}
+
 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 			       struct extent_state *state)
 {
 	struct extent_io_tree *tree;
 	u64 found_start;
 	int found_level;
-	unsigned long len;
 	struct extent_buffer *eb;
 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 	int ret = 0;
+	int reads_done;
 
-	tree = &BTRFS_I(page->mapping->host)->io_tree;
-	if (page->private == EXTENT_PAGE_PRIVATE)
-		goto out;
 	if (!page->private)
 		goto out;
 
-	len = page->private >> 2;
-	WARN_ON(len == 0);
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	eb = (struct extent_buffer *)page->private;
 
-	eb = alloc_extent_buffer(tree, start, len, page);
-	if (eb == NULL) {
+	/* the pending IO might have been the only thing that kept this buffer
+	 * in memory.  Make sure we have a ref for all this other checks
+	 */
+	extent_buffer_get(eb);
+
+	reads_done = atomic_dec_and_test(&eb->io_pages);
+	if (!reads_done)
+		goto err;
+
+	if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
 		ret = -EIO;
-		goto out;
+		goto err;
 	}
 
 	found_start = btrfs_header_bytenr(eb);
-	if (found_start != start) {
+	if (found_start != eb->start) {
 		printk_ratelimited(KERN_INFO "btrfs bad tree block start "
 			       "%llu %llu\n",
 			       (unsigned long long)found_start,
@@ -572,13 +603,6 @@
 		ret = -EIO;
 		goto err;
 	}
-	if (eb->first_page != page) {
-		printk(KERN_INFO "btrfs bad first page %lu %lu\n",
-		       eb->first_page->index, page->index);
-		WARN_ON(1);
-		ret = -EIO;
-		goto err;
-	}
 	if (check_tree_block_fsid(root, eb)) {
 		printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
 			       (unsigned long long)eb->start);
@@ -606,48 +630,31 @@
 		ret = -EIO;
 	}
 
-	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
-	end = eb->start + end - 1;
+	if (!ret)
+		set_extent_buffer_uptodate(eb);
 err:
 	if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
 		clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
 		btree_readahead_hook(root, eb, eb->start, ret);
 	}
 
+	if (ret)
+		clear_extent_buffer_uptodate(eb);
 	free_extent_buffer(eb);
 out:
 	return ret;
 }
 
-static int btree_io_failed_hook(struct bio *failed_bio,
-			 struct page *page, u64 start, u64 end,
-			 int mirror_num, struct extent_state *state)
+static int btree_io_failed_hook(struct page *page, int failed_mirror)
 {
-	struct extent_io_tree *tree;
-	unsigned long len;
 	struct extent_buffer *eb;
 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 
-	tree = &BTRFS_I(page->mapping->host)->io_tree;
-	if (page->private == EXTENT_PAGE_PRIVATE)
-		goto out;
-	if (!page->private)
-		goto out;
-
-	len = page->private >> 2;
-	WARN_ON(len == 0);
-
-	eb = alloc_extent_buffer(tree, start, len, page);
-	if (eb == NULL)
-		goto out;
-
-	if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
-		clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+	eb = (struct extent_buffer *)page->private;
+	set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+	eb->failed_mirror = failed_mirror;
+	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 		btree_readahead_hook(root, eb, eb->start, -EIO);
-	}
-	free_extent_buffer(eb);
-
-out:
 	return -EIO;	/* we fixed nothing */
 }
 
@@ -719,11 +726,14 @@
 static void run_one_async_start(struct btrfs_work *work)
 {
 	struct async_submit_bio *async;
+	int ret;
 
 	async = container_of(work, struct  async_submit_bio, work);
-	async->submit_bio_start(async->inode, async->rw, async->bio,
-			       async->mirror_num, async->bio_flags,
-			       async->bio_offset);
+	ret = async->submit_bio_start(async->inode, async->rw, async->bio,
+				      async->mirror_num, async->bio_flags,
+				      async->bio_offset);
+	if (ret)
+		async->error = ret;
 }
 
 static void run_one_async_done(struct btrfs_work *work)
@@ -744,6 +754,12 @@
 	    waitqueue_active(&fs_info->async_submit_wait))
 		wake_up(&fs_info->async_submit_wait);
 
+	/* If an error occured we just want to clean up the bio and move on */
+	if (async->error) {
+		bio_endio(async->bio, async->error);
+		return;
+	}
+
 	async->submit_bio_done(async->inode, async->rw, async->bio,
 			       async->mirror_num, async->bio_flags,
 			       async->bio_offset);
@@ -785,6 +801,8 @@
 	async->bio_flags = bio_flags;
 	async->bio_offset = bio_offset;
 
+	async->error = 0;
+
 	atomic_inc(&fs_info->nr_async_submits);
 
 	if (rw & REQ_SYNC)
@@ -806,15 +824,18 @@
 	struct bio_vec *bvec = bio->bi_io_vec;
 	int bio_index = 0;
 	struct btrfs_root *root;
+	int ret = 0;
 
 	WARN_ON(bio->bi_vcnt <= 0);
 	while (bio_index < bio->bi_vcnt) {
 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
-		csum_dirty_buffer(root, bvec->bv_page);
+		ret = csum_dirty_buffer(root, bvec->bv_page);
+		if (ret)
+			break;
 		bio_index++;
 		bvec++;
 	}
-	return 0;
+	return ret;
 }
 
 static int __btree_submit_bio_start(struct inode *inode, int rw,
@@ -826,8 +847,7 @@
 	 * when we're called for a write, we're already in the async
 	 * submission context.  Just jump into btrfs_map_bio
 	 */
-	btree_csum_one_bio(bio);
-	return 0;
+	return btree_csum_one_bio(bio);
 }
 
 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
@@ -847,15 +867,16 @@
 {
 	int ret;
 
-	ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
-					  bio, 1);
-	BUG_ON(ret);
-
 	if (!(rw & REQ_WRITE)) {
+
 		/*
 		 * called for a read, do the setup so that checksum validation
 		 * can happen in the async kernel threads
 		 */
+		ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
+					  bio, 1);
+		if (ret)
+			return ret;
 		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
 				     mirror_num, 0);
 	}
@@ -893,34 +914,6 @@
 }
 #endif
 
-static int btree_writepage(struct page *page, struct writeback_control *wbc)
-{
-	struct extent_io_tree *tree;
-	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
-	struct extent_buffer *eb;
-	int was_dirty;
-
-	tree = &BTRFS_I(page->mapping->host)->io_tree;
-	if (!(current->flags & PF_MEMALLOC)) {
-		return extent_write_full_page(tree, page,
-					      btree_get_extent, wbc);
-	}
-
-	redirty_page_for_writepage(wbc, page);
-	eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
-	WARN_ON(!eb);
-
-	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
-	if (!was_dirty) {
-		spin_lock(&root->fs_info->delalloc_lock);
-		root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
-		spin_unlock(&root->fs_info->delalloc_lock);
-	}
-	free_extent_buffer(eb);
-
-	unlock_page(page);
-	return 0;
-}
 
 static int btree_writepages(struct address_space *mapping,
 			    struct writeback_control *wbc)
@@ -940,7 +933,7 @@
 		if (num_dirty < thresh)
 			return 0;
 	}
-	return extent_writepages(tree, mapping, btree_get_extent, wbc);
+	return btree_write_cache_pages(mapping, wbc);
 }
 
 static int btree_readpage(struct file *file, struct page *page)
@@ -952,16 +945,8 @@
 
 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
 {
-	struct extent_io_tree *tree;
-	struct extent_map_tree *map;
-	int ret;
-
 	if (PageWriteback(page) || PageDirty(page))
 		return 0;
-
-	tree = &BTRFS_I(page->mapping->host)->io_tree;
-	map = &BTRFS_I(page->mapping->host)->extent_tree;
-
 	/*
 	 * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
 	 * slab allocation from alloc_extent_state down the callchain where
@@ -969,18 +954,7 @@
 	 */
 	gfp_flags &= ~GFP_SLAB_BUG_MASK;
 
-	ret = try_release_extent_state(map, tree, page, gfp_flags);
-	if (!ret)
-		return 0;
-
-	ret = try_release_extent_buffer(tree, page);
-	if (ret == 1) {
-		ClearPagePrivate(page);
-		set_page_private(page, 0);
-		page_cache_release(page);
-	}
-
-	return ret;
+	return try_release_extent_buffer(page, gfp_flags);
 }
 
 static void btree_invalidatepage(struct page *page, unsigned long offset)
@@ -998,15 +972,28 @@
 	}
 }
 
+static int btree_set_page_dirty(struct page *page)
+{
+	struct extent_buffer *eb;
+
+	BUG_ON(!PagePrivate(page));
+	eb = (struct extent_buffer *)page->private;
+	BUG_ON(!eb);
+	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+	BUG_ON(!atomic_read(&eb->refs));
+	btrfs_assert_tree_locked(eb);
+	return __set_page_dirty_nobuffers(page);
+}
+
 static const struct address_space_operations btree_aops = {
 	.readpage	= btree_readpage,
-	.writepage	= btree_writepage,
 	.writepages	= btree_writepages,
 	.releasepage	= btree_releasepage,
 	.invalidatepage = btree_invalidatepage,
 #ifdef CONFIG_MIGRATION
 	.migratepage	= btree_migratepage,
 #endif
+	.set_page_dirty = btree_set_page_dirty,
 };
 
 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -1049,7 +1036,7 @@
 	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
 		free_extent_buffer(buf);
 		return -EIO;
-	} else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
+	} else if (extent_buffer_uptodate(buf)) {
 		*eb = buf;
 	} else {
 		free_extent_buffer(buf);
@@ -1074,20 +1061,20 @@
 	struct extent_buffer *eb;
 
 	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
-				 bytenr, blocksize, NULL);
+				 bytenr, blocksize);
 	return eb;
 }
 
 
 int btrfs_write_tree_block(struct extent_buffer *buf)
 {
-	return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
+	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
 					buf->start + buf->len - 1);
 }
 
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
 {
-	return filemap_fdatawait_range(buf->first_page->mapping,
+	return filemap_fdatawait_range(buf->pages[0]->mapping,
 				       buf->start, buf->start + buf->len - 1);
 }
 
@@ -1102,17 +1089,13 @@
 		return NULL;
 
 	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
-
-	if (ret == 0)
-		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
 	return buf;
 
 }
 
-int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-		     struct extent_buffer *buf)
+void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		      struct extent_buffer *buf)
 {
-	struct inode *btree_inode = root->fs_info->btree_inode;
 	if (btrfs_header_generation(buf) ==
 	    root->fs_info->running_transaction->transid) {
 		btrfs_assert_tree_locked(buf);
@@ -1121,23 +1104,27 @@
 			spin_lock(&root->fs_info->delalloc_lock);
 			if (root->fs_info->dirty_metadata_bytes >= buf->len)
 				root->fs_info->dirty_metadata_bytes -= buf->len;
-			else
-				WARN_ON(1);
+			else {
+				spin_unlock(&root->fs_info->delalloc_lock);
+				btrfs_panic(root->fs_info, -EOVERFLOW,
+					  "Can't clear %lu bytes from "
+					  " dirty_mdatadata_bytes (%lu)",
+					  buf->len,
+					  root->fs_info->dirty_metadata_bytes);
+			}
 			spin_unlock(&root->fs_info->delalloc_lock);
 		}
 
 		/* ugh, clear_extent_buffer_dirty needs to lock the page */
 		btrfs_set_lock_blocking(buf);
-		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
-					  buf);
+		clear_extent_buffer_dirty(buf);
 	}
-	return 0;
 }
 
-static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
-			u32 stripesize, struct btrfs_root *root,
-			struct btrfs_fs_info *fs_info,
-			u64 objectid)
+static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
+			 u32 stripesize, struct btrfs_root *root,
+			 struct btrfs_fs_info *fs_info,
+			 u64 objectid)
 {
 	root->node = NULL;
 	root->commit_root = NULL;
@@ -1189,13 +1176,12 @@
 	root->defrag_running = 0;
 	root->root_key.objectid = objectid;
 	root->anon_dev = 0;
-	return 0;
 }
 
-static int find_and_setup_root(struct btrfs_root *tree_root,
-			       struct btrfs_fs_info *fs_info,
-			       u64 objectid,
-			       struct btrfs_root *root)
+static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
+					    struct btrfs_fs_info *fs_info,
+					    u64 objectid,
+					    struct btrfs_root *root)
 {
 	int ret;
 	u32 blocksize;
@@ -1208,7 +1194,8 @@
 				   &root->root_item, &root->root_key);
 	if (ret > 0)
 		return -ENOENT;
-	BUG_ON(ret);
+	else if (ret < 0)
+		return ret;
 
 	generation = btrfs_root_generation(&root->root_item);
 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
@@ -1377,7 +1364,7 @@
 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
 				     blocksize, generation);
 	root->commit_root = btrfs_root_node(root);
-	BUG_ON(!root->node);
+	BUG_ON(!root->node); /* -ENOMEM */
 out:
 	if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
 		root->ref_cows = 1;
@@ -1513,41 +1500,6 @@
 	return 0;
 }
 
-static int bio_ready_for_csum(struct bio *bio)
-{
-	u64 length = 0;
-	u64 buf_len = 0;
-	u64 start = 0;
-	struct page *page;
-	struct extent_io_tree *io_tree = NULL;
-	struct bio_vec *bvec;
-	int i;
-	int ret;
-
-	bio_for_each_segment(bvec, bio, i) {
-		page = bvec->bv_page;
-		if (page->private == EXTENT_PAGE_PRIVATE) {
-			length += bvec->bv_len;
-			continue;
-		}
-		if (!page->private) {
-			length += bvec->bv_len;
-			continue;
-		}
-		length = bvec->bv_len;
-		buf_len = page->private >> 2;
-		start = page_offset(page) + bvec->bv_offset;
-		io_tree = &BTRFS_I(page->mapping->host)->io_tree;
-	}
-	/* are we fully contained in this bio? */
-	if (buf_len <= length)
-		return 1;
-
-	ret = extent_range_uptodate(io_tree, start + length,
-				    start + buf_len - 1);
-	return ret;
-}
-
 /*
  * called by the kthread helper functions to finally call the bio end_io
  * functions.  This is where read checksum verification actually happens
@@ -1563,17 +1515,6 @@
 	bio = end_io_wq->bio;
 	fs_info = end_io_wq->info;
 
-	/* metadata bio reads are special because the whole tree block must
-	 * be checksummed at once.  This makes sure the entire block is in
-	 * ram and up to date before trying to verify things.  For
-	 * blocksize <= pagesize, it is basically a noop
-	 */
-	if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
-	    !bio_ready_for_csum(bio)) {
-		btrfs_queue_worker(&fs_info->endio_meta_workers,
-				   &end_io_wq->work);
-		return;
-	}
 	error = end_io_wq->error;
 	bio->bi_private = end_io_wq->private;
 	bio->bi_end_io = end_io_wq->end_io;
@@ -1614,9 +1555,10 @@
 	u64 transid;
 	unsigned long now;
 	unsigned long delay;
-	int ret;
+	bool cannot_commit;
 
 	do {
+		cannot_commit = false;
 		delay = HZ * 30;
 		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
 		mutex_lock(&root->fs_info->transaction_kthread_mutex);
@@ -1638,11 +1580,14 @@
 		transid = cur->transid;
 		spin_unlock(&root->fs_info->trans_lock);
 
+		/* If the file system is aborted, this will always fail. */
 		trans = btrfs_join_transaction(root);
-		BUG_ON(IS_ERR(trans));
+		if (IS_ERR(trans)) {
+			cannot_commit = true;
+			goto sleep;
+		}
 		if (transid == trans->transid) {
-			ret = btrfs_commit_transaction(trans, root);
-			BUG_ON(ret);
+			btrfs_commit_transaction(trans, root);
 		} else {
 			btrfs_end_transaction(trans, root);
 		}
@@ -1653,7 +1598,8 @@
 		if (!try_to_freeze()) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop() &&
-			    !btrfs_transaction_blocked(root->fs_info))
+			    (!btrfs_transaction_blocked(root->fs_info) ||
+			     cannot_commit))
 				schedule_timeout(delay);
 			__set_current_state(TASK_RUNNING);
 		}
@@ -2042,6 +1988,7 @@
 	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
 	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
 			     fs_info->btree_inode->i_mapping);
+	BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
 	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
 
 	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
@@ -2084,6 +2031,7 @@
 	__setup_root(4096, 4096, 4096, 4096, tree_root,
 		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
 
+	invalidate_bdev(fs_devices->latest_bdev);
 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
 	if (!bh) {
 		err = -EINVAL;
@@ -2104,7 +2052,12 @@
 	/* check FS state, whether FS is broken. */
 	fs_info->fs_state |= btrfs_super_flags(disk_super);
 
-	btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+	ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+	if (ret) {
+		printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
+		err = ret;
+		goto fail_alloc;
+	}
 
 	/*
 	 * run through our array of backup supers and setup
@@ -2135,10 +2088,55 @@
 		goto fail_alloc;
 	}
 
+	if (btrfs_super_leafsize(disk_super) !=
+	    btrfs_super_nodesize(disk_super)) {
+		printk(KERN_ERR "BTRFS: couldn't mount because metadata "
+		       "blocksizes don't match.  node %d leaf %d\n",
+		       btrfs_super_nodesize(disk_super),
+		       btrfs_super_leafsize(disk_super));
+		err = -EINVAL;
+		goto fail_alloc;
+	}
+	if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
+		printk(KERN_ERR "BTRFS: couldn't mount because metadata "
+		       "blocksize (%d) was too large\n",
+		       btrfs_super_leafsize(disk_super));
+		err = -EINVAL;
+		goto fail_alloc;
+	}
+
 	features = btrfs_super_incompat_flags(disk_super);
 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
 	if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+
+	/*
+	 * flag our filesystem as having big metadata blocks if
+	 * they are bigger than the page size
+	 */
+	if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
+		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
+			printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
+		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+	}
+
+	nodesize = btrfs_super_nodesize(disk_super);
+	leafsize = btrfs_super_leafsize(disk_super);
+	sectorsize = btrfs_super_sectorsize(disk_super);
+	stripesize = btrfs_super_stripesize(disk_super);
+
+	/*
+	 * mixed block groups end up with duplicate but slightly offset
+	 * extent buffers for the same range.  It leads to corruptions
+	 */
+	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
+	    (sectorsize != leafsize)) {
+		printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
+				"are not allowed for mixed block groups on %s\n",
+				sb->s_id);
+		goto fail_alloc;
+	}
+
 	btrfs_set_super_incompat_flags(disk_super, features);
 
 	features = btrfs_super_compat_ro_flags(disk_super) &
@@ -2242,10 +2240,6 @@
 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
 				    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
 
-	nodesize = btrfs_super_nodesize(disk_super);
-	leafsize = btrfs_super_leafsize(disk_super);
-	sectorsize = btrfs_super_sectorsize(disk_super);
-	stripesize = btrfs_super_stripesize(disk_super);
 	tree_root->nodesize = nodesize;
 	tree_root->leafsize = leafsize;
 	tree_root->sectorsize = sectorsize;
@@ -2285,7 +2279,7 @@
 	chunk_root->node = read_tree_block(chunk_root,
 					   btrfs_super_chunk_root(disk_super),
 					   blocksize, generation);
-	BUG_ON(!chunk_root->node);
+	BUG_ON(!chunk_root->node); /* -ENOMEM */
 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
 		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
 		       sb->s_id);
@@ -2425,21 +2419,31 @@
 		log_tree_root->node = read_tree_block(tree_root, bytenr,
 						      blocksize,
 						      generation + 1);
+		/* returns with log_tree_root freed on success */
 		ret = btrfs_recover_log_trees(log_tree_root);
-		BUG_ON(ret);
+		if (ret) {
+			btrfs_error(tree_root->fs_info, ret,
+				    "Failed to recover log tree");
+			free_extent_buffer(log_tree_root->node);
+			kfree(log_tree_root);
+			goto fail_trans_kthread;
+		}
 
 		if (sb->s_flags & MS_RDONLY) {
-			ret =  btrfs_commit_super(tree_root);
-			BUG_ON(ret);
+			ret = btrfs_commit_super(tree_root);
+			if (ret)
+				goto fail_trans_kthread;
 		}
 	}
 
 	ret = btrfs_find_orphan_roots(tree_root);
-	BUG_ON(ret);
+	if (ret)
+		goto fail_trans_kthread;
 
 	if (!(sb->s_flags & MS_RDONLY)) {
 		ret = btrfs_cleanup_fs_roots(fs_info);
-		BUG_ON(ret);
+		if (ret) {
+			}
 
 		ret = btrfs_recover_relocation(tree_root);
 		if (ret < 0) {
@@ -2859,6 +2863,8 @@
 	if (total_errors > max_errors) {
 		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
 		       total_errors);
+
+		/* This shouldn't happen. FUA is masked off if unsupported */
 		BUG();
 	}
 
@@ -2875,9 +2881,9 @@
 	}
 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 	if (total_errors > max_errors) {
-		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
-		       total_errors);
-		BUG();
+		btrfs_error(root->fs_info, -EIO,
+			    "%d errors while writing supers", total_errors);
+		return -EIO;
 	}
 	return 0;
 }
@@ -2891,7 +2897,20 @@
 	return ret;
 }
 
-int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+/* Kill all outstanding I/O */
+void btrfs_abort_devices(struct btrfs_root *root)
+{
+	struct list_head *head;
+	struct btrfs_device *dev;
+	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+	head = &root->fs_info->fs_devices->devices;
+	list_for_each_entry_rcu(dev, head, dev_list) {
+		blk_abort_queue(dev->bdev->bd_disk->queue);
+	}
+	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+}
+
+void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
 {
 	spin_lock(&fs_info->fs_roots_radix_lock);
 	radix_tree_delete(&fs_info->fs_roots_radix,
@@ -2904,7 +2923,6 @@
 	__btrfs_remove_free_space_cache(root->free_ino_pinned);
 	__btrfs_remove_free_space_cache(root->free_ino_ctl);
 	free_fs_root(root);
-	return 0;
 }
 
 static void free_fs_root(struct btrfs_root *root)
@@ -2921,7 +2939,7 @@
 	kfree(root);
 }
 
-static int del_fs_roots(struct btrfs_fs_info *fs_info)
+static void del_fs_roots(struct btrfs_fs_info *fs_info)
 {
 	int ret;
 	struct btrfs_root *gang[8];
@@ -2950,7 +2968,6 @@
 		for (i = 0; i < ret; i++)
 			btrfs_free_fs_root(fs_info, gang[i]);
 	}
-	return 0;
 }
 
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
@@ -2999,14 +3016,21 @@
 	if (IS_ERR(trans))
 		return PTR_ERR(trans);
 	ret = btrfs_commit_transaction(trans, root);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 	/* run commit again to drop the original snapshot */
 	trans = btrfs_join_transaction(root);
 	if (IS_ERR(trans))
 		return PTR_ERR(trans);
-	btrfs_commit_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans, root);
+	if (ret)
+		return ret;
 	ret = btrfs_write_and_wait_transaction(NULL, root);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_error(root->fs_info, ret,
+			    "Failed to sync btree inode to disk.");
+		return ret;
+	}
 
 	ret = write_ctree_super(NULL, root, 0);
 	return ret;
@@ -3122,10 +3146,9 @@
 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
 {
 	int ret;
-	struct inode *btree_inode = buf->first_page->mapping->host;
+	struct inode *btree_inode = buf->pages[0]->mapping->host;
 
-	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
-				     NULL);
+	ret = extent_buffer_uptodate(buf);
 	if (!ret)
 		return ret;
 
@@ -3136,16 +3159,13 @@
 
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
 {
-	struct inode *btree_inode = buf->first_page->mapping->host;
-	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
-					  buf);
+	return set_extent_buffer_uptodate(buf);
 }
 
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
 {
-	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
 	u64 transid = btrfs_header_generation(buf);
-	struct inode *btree_inode = root->fs_info->btree_inode;
 	int was_dirty;
 
 	btrfs_assert_tree_locked(buf);
@@ -3157,8 +3177,7 @@
 			(unsigned long long)root->fs_info->generation);
 		WARN_ON(1);
 	}
-	was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
-					    buf);
+	was_dirty = set_extent_buffer_dirty(buf);
 	if (!was_dirty) {
 		spin_lock(&root->fs_info->delalloc_lock);
 		root->fs_info->dirty_metadata_bytes += buf->len;
@@ -3212,12 +3231,8 @@
 
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
 {
-	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
-	int ret;
-	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
-	if (ret == 0)
-		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
-	return ret;
+	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+	return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
 }
 
 static int btree_lock_page_hook(struct page *page, void *data,
@@ -3225,17 +3240,21 @@
 {
 	struct inode *inode = page->mapping->host;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_buffer *eb;
-	unsigned long len;
-	u64 bytenr = page_offset(page);
 
-	if (page->private == EXTENT_PAGE_PRIVATE)
+	/*
+	 * We culled this eb but the page is still hanging out on the mapping,
+	 * carry on.
+	 */
+	if (!PagePrivate(page))
 		goto out;
 
-	len = page->private >> 2;
-	eb = find_extent_buffer(io_tree, bytenr, len);
-	if (!eb)
+	eb = (struct extent_buffer *)page->private;
+	if (!eb) {
+		WARN_ON(1);
+		goto out;
+	}
+	if (page != eb->pages[0])
 		goto out;
 
 	if (!btrfs_try_tree_write_lock(eb)) {
@@ -3254,7 +3273,6 @@
 	}
 
 	btrfs_tree_unlock(eb);
-	free_extent_buffer(eb);
 out:
 	if (!trylock_page(page)) {
 		flush_fn(data);
@@ -3263,15 +3281,23 @@
 	return 0;
 }
 
-static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
 			      int read_only)
 {
-	if (read_only)
-		return;
+	if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
+		printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
+		return -EINVAL;
+	}
 
-	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+	if (read_only)
+		return 0;
+
+	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
 		printk(KERN_WARNING "warning: mount fs with errors, "
 		       "running btrfsck is recommended\n");
+	}
+
+	return 0;
 }
 
 int btrfs_error_commit_super(struct btrfs_root *root)
@@ -3293,7 +3319,7 @@
 	return ret;
 }
 
-static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
+static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
 {
 	struct btrfs_inode *btrfs_inode;
 	struct list_head splice;
@@ -3315,11 +3341,9 @@
 
 	spin_unlock(&root->fs_info->ordered_extent_lock);
 	mutex_unlock(&root->fs_info->ordered_operations_mutex);
-
-	return 0;
 }
 
-static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
+static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
 {
 	struct list_head splice;
 	struct btrfs_ordered_extent *ordered;
@@ -3351,12 +3375,10 @@
 	}
 
 	spin_unlock(&root->fs_info->ordered_extent_lock);
-
-	return 0;
 }
 
-static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
-				      struct btrfs_root *root)
+int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+			       struct btrfs_root *root)
 {
 	struct rb_node *node;
 	struct btrfs_delayed_ref_root *delayed_refs;
@@ -3365,6 +3387,7 @@
 
 	delayed_refs = &trans->delayed_refs;
 
+again:
 	spin_lock(&delayed_refs->lock);
 	if (delayed_refs->num_entries == 0) {
 		spin_unlock(&delayed_refs->lock);
@@ -3386,6 +3409,7 @@
 			struct btrfs_delayed_ref_head *head;
 
 			head = btrfs_delayed_node_to_head(ref);
+			spin_unlock(&delayed_refs->lock);
 			mutex_lock(&head->mutex);
 			kfree(head->extent_op);
 			delayed_refs->num_heads--;
@@ -3393,8 +3417,9 @@
 				delayed_refs->num_heads_ready--;
 			list_del_init(&head->cluster);
 			mutex_unlock(&head->mutex);
+			btrfs_put_delayed_ref(ref);
+			goto again;
 		}
-
 		spin_unlock(&delayed_refs->lock);
 		btrfs_put_delayed_ref(ref);
 
@@ -3407,7 +3432,7 @@
 	return ret;
 }
 
-static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
+static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
 {
 	struct btrfs_pending_snapshot *snapshot;
 	struct list_head splice;
@@ -3425,11 +3450,9 @@
 
 		kfree(snapshot);
 	}
-
-	return 0;
 }
 
-static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
 {
 	struct btrfs_inode *btrfs_inode;
 	struct list_head splice;
@@ -3449,8 +3472,6 @@
 	}
 
 	spin_unlock(&root->fs_info->delalloc_lock);
-
-	return 0;
 }
 
 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
@@ -3541,13 +3562,43 @@
 	return 0;
 }
 
-static int btrfs_cleanup_transaction(struct btrfs_root *root)
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+				   struct btrfs_root *root)
+{
+	btrfs_destroy_delayed_refs(cur_trans, root);
+	btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+				cur_trans->dirty_pages.dirty_bytes);
+
+	/* FIXME: cleanup wait for commit */
+	cur_trans->in_commit = 1;
+	cur_trans->blocked = 1;
+	if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
+		wake_up(&root->fs_info->transaction_blocked_wait);
+
+	cur_trans->blocked = 0;
+	if (waitqueue_active(&root->fs_info->transaction_wait))
+		wake_up(&root->fs_info->transaction_wait);
+
+	cur_trans->commit_done = 1;
+	if (waitqueue_active(&cur_trans->commit_wait))
+		wake_up(&cur_trans->commit_wait);
+
+	btrfs_destroy_pending_snapshots(cur_trans);
+
+	btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
+				     EXTENT_DIRTY);
+
+	/*
+	memset(cur_trans, 0, sizeof(*cur_trans));
+	kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+	*/
+}
+
+int btrfs_cleanup_transaction(struct btrfs_root *root)
 {
 	struct btrfs_transaction *t;
 	LIST_HEAD(list);
 
-	WARN_ON(1);
-
 	mutex_lock(&root->fs_info->transaction_kthread_mutex);
 
 	spin_lock(&root->fs_info->trans_lock);
@@ -3612,6 +3663,17 @@
 	return 0;
 }
 
+static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
+					  u64 start, u64 end,
+					  struct extent_state *state)
+{
+	struct super_block *sb = page->mapping->host->i_sb;
+	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+	btrfs_error(fs_info, -EIO,
+		    "Error occured while writing out btree at %llu", start);
+	return -EIO;
+}
+
 static struct extent_io_ops btree_extent_io_ops = {
 	.write_cache_pages_lock_hook = btree_lock_page_hook,
 	.readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3619,4 +3681,5 @@
 	.submit_bio_hook = btree_submit_bio_hook,
 	/* note we're sharing with inode.c for the merge bio hook */
 	.merge_bio_hook = btrfs_merge_bio_hook,
+	.writepage_io_failed_hook = btree_writepage_io_failed_hook,
 };
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e4bc474..a7ace1a 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -44,8 +44,8 @@
 			 int mirror_num, struct extent_buffer **eb);
 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 						   u64 bytenr, u32 blocksize);
-int clean_tree_block(struct btrfs_trans_handle *trans,
-		     struct btrfs_root *root, struct extent_buffer *buf);
+void clean_tree_block(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root, struct extent_buffer *buf);
 int open_ctree(struct super_block *sb,
 	       struct btrfs_fs_devices *fs_devices,
 	       char *options);
@@ -64,7 +64,7 @@
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
-int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
+void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
@@ -85,6 +85,10 @@
 			     struct btrfs_fs_info *fs_info);
 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root);
+int btrfs_cleanup_transaction(struct btrfs_root *root);
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
+				  struct btrfs_root *root);
+void btrfs_abort_devices(struct btrfs_root *root);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 5f77166..e887ee6 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -193,7 +193,7 @@
 	if (ret < 0)
 		goto fail;
 
-	BUG_ON(ret == 0);
+	BUG_ON(ret == 0); /* Key with offset of -1 found */
 	if (path->slots[0] == 0) {
 		ret = -ENOENT;
 		goto fail;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 37e0a80..a844204 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -245,7 +245,7 @@
 		cache->bytes_super += stripe_len;
 		ret = add_excluded_extent(root, cache->key.objectid,
 					  stripe_len);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 
 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -253,13 +253,13 @@
 		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
 				       cache->key.objectid, bytenr,
 				       0, &logical, &nr, &stripe_len);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 
 		while (nr--) {
 			cache->bytes_super += stripe_len;
 			ret = add_excluded_extent(root, logical[nr],
 						  stripe_len);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 		}
 
 		kfree(logical);
@@ -321,7 +321,7 @@
 			total_added += size;
 			ret = btrfs_add_free_space(block_group, start,
 						   size);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM or logic error */
 			start = extent_end + 1;
 		} else {
 			break;
@@ -332,7 +332,7 @@
 		size = end - start;
 		total_added += size;
 		ret = btrfs_add_free_space(block_group, start, size);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM or logic error */
 	}
 
 	return total_added;
@@ -474,7 +474,8 @@
 	int ret = 0;
 
 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
-	BUG_ON(!caching_ctl);
+	if (!caching_ctl)
+		return -ENOMEM;
 
 	INIT_LIST_HEAD(&caching_ctl->list);
 	mutex_init(&caching_ctl->mutex);
@@ -982,7 +983,7 @@
 				ret = btrfs_next_leaf(root, path);
 				if (ret < 0)
 					return ret;
-				BUG_ON(ret > 0);
+				BUG_ON(ret > 0); /* Corruption */
 				leaf = path->nodes[0];
 			}
 			btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1008,9 +1009,9 @@
 				new_size + extra_size, 1);
 	if (ret < 0)
 		return ret;
-	BUG_ON(ret);
+	BUG_ON(ret); /* Corruption */
 
-	ret = btrfs_extend_item(trans, root, path, new_size);
+	btrfs_extend_item(trans, root, path, new_size);
 
 	leaf = path->nodes[0];
 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1478,7 +1479,11 @@
 		err = ret;
 		goto out;
 	}
-	BUG_ON(ret);
+	if (ret && !insert) {
+		err = -ENOENT;
+		goto out;
+	}
+	BUG_ON(ret); /* Corruption */
 
 	leaf = path->nodes[0];
 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -1592,13 +1597,13 @@
  * helper to add new inline back ref
  */
 static noinline_for_stack
-int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
-				struct btrfs_path *path,
-				struct btrfs_extent_inline_ref *iref,
-				u64 parent, u64 root_objectid,
-				u64 owner, u64 offset, int refs_to_add,
-				struct btrfs_delayed_extent_op *extent_op)
+void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct btrfs_path *path,
+				 struct btrfs_extent_inline_ref *iref,
+				 u64 parent, u64 root_objectid,
+				 u64 owner, u64 offset, int refs_to_add,
+				 struct btrfs_delayed_extent_op *extent_op)
 {
 	struct extent_buffer *leaf;
 	struct btrfs_extent_item *ei;
@@ -1608,7 +1613,6 @@
 	u64 refs;
 	int size;
 	int type;
-	int ret;
 
 	leaf = path->nodes[0];
 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1617,7 +1621,7 @@
 	type = extent_ref_type(parent, owner);
 	size = btrfs_extent_inline_ref_size(type);
 
-	ret = btrfs_extend_item(trans, root, path, size);
+	btrfs_extend_item(trans, root, path, size);
 
 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
 	refs = btrfs_extent_refs(leaf, ei);
@@ -1652,7 +1656,6 @@
 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
 	}
 	btrfs_mark_buffer_dirty(leaf);
-	return 0;
 }
 
 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1687,12 +1690,12 @@
  * helper to update/remove inline back ref
  */
 static noinline_for_stack
-int update_inline_extent_backref(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root,
-				 struct btrfs_path *path,
-				 struct btrfs_extent_inline_ref *iref,
-				 int refs_to_mod,
-				 struct btrfs_delayed_extent_op *extent_op)
+void update_inline_extent_backref(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *root,
+				  struct btrfs_path *path,
+				  struct btrfs_extent_inline_ref *iref,
+				  int refs_to_mod,
+				  struct btrfs_delayed_extent_op *extent_op)
 {
 	struct extent_buffer *leaf;
 	struct btrfs_extent_item *ei;
@@ -1703,7 +1706,6 @@
 	u32 item_size;
 	int size;
 	int type;
-	int ret;
 	u64 refs;
 
 	leaf = path->nodes[0];
@@ -1745,10 +1747,9 @@
 			memmove_extent_buffer(leaf, ptr, ptr + size,
 					      end - ptr - size);
 		item_size -= size;
-		ret = btrfs_truncate_item(trans, root, path, item_size, 1);
+		btrfs_truncate_item(trans, root, path, item_size, 1);
 	}
 	btrfs_mark_buffer_dirty(leaf);
-	return 0;
 }
 
 static noinline_for_stack
@@ -1768,13 +1769,13 @@
 					   root_objectid, owner, offset, 1);
 	if (ret == 0) {
 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
-		ret = update_inline_extent_backref(trans, root, path, iref,
-						   refs_to_add, extent_op);
+		update_inline_extent_backref(trans, root, path, iref,
+					     refs_to_add, extent_op);
 	} else if (ret == -ENOENT) {
-		ret = setup_inline_extent_backref(trans, root, path, iref,
-						  parent, root_objectid,
-						  owner, offset, refs_to_add,
-						  extent_op);
+		setup_inline_extent_backref(trans, root, path, iref, parent,
+					    root_objectid, owner, offset,
+					    refs_to_add, extent_op);
+		ret = 0;
 	}
 	return ret;
 }
@@ -1804,12 +1805,12 @@
 				 struct btrfs_extent_inline_ref *iref,
 				 int refs_to_drop, int is_data)
 {
-	int ret;
+	int ret = 0;
 
 	BUG_ON(!is_data && refs_to_drop != 1);
 	if (iref) {
-		ret = update_inline_extent_backref(trans, root, path, iref,
-						   -refs_to_drop, NULL);
+		update_inline_extent_backref(trans, root, path, iref,
+					     -refs_to_drop, NULL);
 	} else if (is_data) {
 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
 	} else {
@@ -1835,6 +1836,7 @@
 	/* Tell the block device(s) that the sectors can be discarded */
 	ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
 			      bytenr, &num_bytes, &bbio, 0);
+	/* Error condition is -ENOMEM */
 	if (!ret) {
 		struct btrfs_bio_stripe *stripe = bbio->stripes;
 		int i;
@@ -1850,7 +1852,7 @@
 			if (!ret)
 				discarded_bytes += stripe->length;
 			else if (ret != -EOPNOTSUPP)
-				break;
+				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
 
 			/*
 			 * Just in case we get back EOPNOTSUPP for some reason,
@@ -1869,6 +1871,7 @@
 	return ret;
 }
 
+/* Can return -ENOMEM */
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
 			 struct btrfs_root *root,
 			 u64 bytenr, u64 num_bytes, u64 parent,
@@ -1944,7 +1947,8 @@
 	ret = insert_extent_backref(trans, root->fs_info->extent_root,
 				    path, bytenr, parent, root_objectid,
 				    owner, offset, refs_to_add);
-	BUG_ON(ret);
+	if (ret)
+		btrfs_abort_transaction(trans, root, ret);
 out:
 	btrfs_free_path(path);
 	return err;
@@ -2031,6 +2035,9 @@
 	int ret;
 	int err = 0;
 
+	if (trans->aborted)
+		return 0;
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -2128,7 +2135,11 @@
 			       struct btrfs_delayed_extent_op *extent_op,
 			       int insert_reserved)
 {
-	int ret;
+	int ret = 0;
+
+	if (trans->aborted)
+		return 0;
+
 	if (btrfs_delayed_ref_is_head(node)) {
 		struct btrfs_delayed_ref_head *head;
 		/*
@@ -2146,11 +2157,10 @@
 				ret = btrfs_del_csums(trans, root,
 						      node->bytenr,
 						      node->num_bytes);
-				BUG_ON(ret);
 			}
 		}
 		mutex_unlock(&head->mutex);
-		return 0;
+		return ret;
 	}
 
 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
@@ -2197,6 +2207,10 @@
 	return NULL;
 }
 
+/*
+ * Returns 0 on success or if called with an already aborted transaction.
+ * Returns -ENOMEM or -EIO on failure and will abort the transaction.
+ */
 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
 				       struct btrfs_root *root,
 				       struct list_head *cluster)
@@ -2285,9 +2299,13 @@
 
 				ret = run_delayed_extent_op(trans, root,
 							    ref, extent_op);
-				BUG_ON(ret);
 				kfree(extent_op);
 
+				if (ret) {
+					printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+					return ret;
+				}
+
 				goto next;
 			}
 
@@ -2308,11 +2326,16 @@
 
 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
 					  must_insert_reserved);
-		BUG_ON(ret);
 
 		btrfs_put_delayed_ref(ref);
 		kfree(extent_op);
 		count++;
+
+		if (ret) {
+			printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+			return ret;
+		}
+
 next:
 		do_chunk_alloc(trans, root->fs_info->extent_root,
 			       2 * 1024 * 1024,
@@ -2347,6 +2370,9 @@
  * 0, which means to process everything in the tree at the start
  * of the run (but not newly added entries), or it can be some target
  * number you'd like to process.
+ *
+ * Returns 0 on success or if called with an aborted transaction
+ * Returns <0 on error and aborts the transaction
  */
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root, unsigned long count)
@@ -2362,6 +2388,10 @@
 	unsigned long num_refs = 0;
 	int consider_waiting;
 
+	/* We'll clean this up in btrfs_cleanup_transaction */
+	if (trans->aborted)
+		return 0;
+
 	if (root == root->fs_info->extent_root)
 		root = root->fs_info->tree_root;
 
@@ -2419,7 +2449,11 @@
 		}
 
 		ret = run_clustered_refs(trans, root, &cluster);
-		BUG_ON(ret < 0);
+		if (ret < 0) {
+			spin_unlock(&delayed_refs->lock);
+			btrfs_abort_transaction(trans, root, ret);
+			return ret;
+		}
 
 		count -= min_t(unsigned long, ret, count);
 
@@ -2584,7 +2618,7 @@
 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
 	if (ret < 0)
 		goto out;
-	BUG_ON(ret == 0);
+	BUG_ON(ret == 0); /* Corruption */
 
 	ret = -ENOENT;
 	if (path->slots[0] == 0)
@@ -2738,7 +2772,6 @@
 	}
 	return 0;
 fail:
-	BUG();
 	return ret;
 }
 
@@ -2767,7 +2800,7 @@
 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
 	if (ret < 0)
 		goto fail;
-	BUG_ON(ret);
+	BUG_ON(ret); /* Corruption */
 
 	leaf = path->nodes[0];
 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -2775,8 +2808,10 @@
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_release_path(path);
 fail:
-	if (ret)
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
 		return ret;
+	}
 	return 0;
 
 }
@@ -2949,7 +2984,8 @@
 		if (last == 0) {
 			err = btrfs_run_delayed_refs(trans, root,
 						     (unsigned long)-1);
-			BUG_ON(err);
+			if (err) /* File system offline */
+				goto out;
 		}
 
 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -2976,7 +3012,9 @@
 		last = cache->key.objectid + cache->key.offset;
 
 		err = write_one_cache_group(trans, root, path, cache);
-		BUG_ON(err);
+		if (err) /* File system offline */
+			goto out;
+
 		btrfs_put_block_group(cache);
 	}
 
@@ -2989,7 +3027,8 @@
 		if (last == 0) {
 			err = btrfs_run_delayed_refs(trans, root,
 						     (unsigned long)-1);
-			BUG_ON(err);
+			if (err) /* File system offline */
+				goto out;
 		}
 
 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -3014,20 +3053,21 @@
 			continue;
 		}
 
-		btrfs_write_out_cache(root, trans, cache, path);
+		err = btrfs_write_out_cache(root, trans, cache, path);
 
 		/*
 		 * If we didn't have an error then the cache state is still
 		 * NEED_WRITE, so we can set it to WRITTEN.
 		 */
-		if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
+		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
 			cache->disk_cache_state = BTRFS_DC_WRITTEN;
 		last = cache->key.objectid + cache->key.offset;
 		btrfs_put_block_group(cache);
 	}
+out:
 
 	btrfs_free_path(path);
-	return 0;
+	return err;
 }
 
 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -3098,11 +3138,8 @@
 
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
-	u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
-	/* chunk -> extended profile */
-	if (extra_flags == 0)
-		extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+	u64 extra_flags = chunk_to_extended(flags) &
+				BTRFS_EXTENDED_PROFILE_MASK;
 
 	if (flags & BTRFS_BLOCK_GROUP_DATA)
 		fs_info->avail_data_alloc_bits |= extra_flags;
@@ -3113,6 +3150,35 @@
 }
 
 /*
+ * returns target flags in extended format or 0 if restripe for this
+ * chunk_type is not in progress
+ */
+static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+{
+	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+	u64 target = 0;
+
+	BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
+	       !spin_is_locked(&fs_info->balance_lock));
+
+	if (!bctl)
+		return 0;
+
+	if (flags & BTRFS_BLOCK_GROUP_DATA &&
+	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
+		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
+		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+	}
+
+	return target;
+}
+
+/*
  * @flags: available profiles in extended format (see ctree.h)
  *
  * Returns reduced profile in chunk format.  If profile changing is in
@@ -3128,31 +3194,19 @@
 	 */
 	u64 num_devices = root->fs_info->fs_devices->rw_devices +
 		root->fs_info->fs_devices->missing_devices;
+	u64 target;
 
-	/* pick restriper's target profile if it's available */
+	/*
+	 * see if restripe for this chunk_type is in progress, if so
+	 * try to reduce to the target profile
+	 */
 	spin_lock(&root->fs_info->balance_lock);
-	if (root->fs_info->balance_ctl) {
-		struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
-		u64 tgt = 0;
-
-		if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
-		    (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-		    (flags & bctl->data.target)) {
-			tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
-		} else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
-			   (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-			   (flags & bctl->sys.target)) {
-			tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
-		} else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
-			   (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-			   (flags & bctl->meta.target)) {
-			tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
-		}
-
-		if (tgt) {
+	target = get_restripe_target(root->fs_info, flags);
+	if (target) {
+		/* pick target profile only if it's already available */
+		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
 			spin_unlock(&root->fs_info->balance_lock);
-			flags = tgt;
-			goto out;
+			return extended_to_chunk(target);
 		}
 	}
 	spin_unlock(&root->fs_info->balance_lock);
@@ -3180,10 +3234,7 @@
 		flags &= ~BTRFS_BLOCK_GROUP_RAID0;
 	}
 
-out:
-	/* extended -> chunk profile */
-	flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-	return flags;
+	return extended_to_chunk(flags);
 }
 
 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
@@ -3312,8 +3363,7 @@
 	}
 	data_sinfo->bytes_may_use += bytes;
 	trace_btrfs_space_reservation(root->fs_info, "space_info",
-				      (u64)(unsigned long)data_sinfo,
-				      bytes, 1);
+				      data_sinfo->flags, bytes, 1);
 	spin_unlock(&data_sinfo->lock);
 
 	return 0;
@@ -3334,8 +3384,7 @@
 	spin_lock(&data_sinfo->lock);
 	data_sinfo->bytes_may_use -= bytes;
 	trace_btrfs_space_reservation(root->fs_info, "space_info",
-				      (u64)(unsigned long)data_sinfo,
-				      bytes, 0);
+				      data_sinfo->flags, bytes, 0);
 	spin_unlock(&data_sinfo->lock);
 }
 
@@ -3396,6 +3445,50 @@
 	return 1;
 }
 
+static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
+{
+	u64 num_dev;
+
+	if (type & BTRFS_BLOCK_GROUP_RAID10 ||
+	    type & BTRFS_BLOCK_GROUP_RAID0)
+		num_dev = root->fs_info->fs_devices->rw_devices;
+	else if (type & BTRFS_BLOCK_GROUP_RAID1)
+		num_dev = 2;
+	else
+		num_dev = 1;	/* DUP or single */
+
+	/* metadata for updaing devices and chunk tree */
+	return btrfs_calc_trans_metadata_size(root, num_dev + 1);
+}
+
+static void check_system_chunk(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root, u64 type)
+{
+	struct btrfs_space_info *info;
+	u64 left;
+	u64 thresh;
+
+	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+	spin_lock(&info->lock);
+	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
+		info->bytes_reserved - info->bytes_readonly;
+	spin_unlock(&info->lock);
+
+	thresh = get_system_chunk_thresh(root, type);
+	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
+		printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
+		       left, thresh, type);
+		dump_space_info(info, 0, 0);
+	}
+
+	if (left < thresh) {
+		u64 flags;
+
+		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
+		btrfs_alloc_chunk(trans, root, flags);
+	}
+}
+
 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *extent_root, u64 alloc_bytes,
 			  u64 flags, int force)
@@ -3405,15 +3498,13 @@
 	int wait_for_alloc = 0;
 	int ret = 0;
 
-	BUG_ON(!profile_is_valid(flags, 0));
-
 	space_info = __find_space_info(extent_root->fs_info, flags);
 	if (!space_info) {
 		ret = update_space_info(extent_root->fs_info, flags,
 					0, 0, &space_info);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
-	BUG_ON(!space_info);
+	BUG_ON(!space_info); /* Logic error */
 
 again:
 	spin_lock(&space_info->lock);
@@ -3468,6 +3559,12 @@
 			force_metadata_allocation(fs_info);
 	}
 
+	/*
+	 * Check if we have enough space in SYSTEM chunk because we may need
+	 * to update devices.
+	 */
+	check_system_chunk(trans, extent_root, flags);
+
 	ret = btrfs_alloc_chunk(trans, extent_root, flags);
 	if (ret < 0 && ret != -ENOSPC)
 		goto out;
@@ -3678,8 +3775,10 @@
 		ret = wait_event_interruptible(space_info->wait,
 					       !space_info->flush);
 		/* Must have been interrupted, return */
-		if (ret)
+		if (ret) {
+			printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);
 			return -EINTR;
+		}
 
 		spin_lock(&space_info->lock);
 	}
@@ -3700,9 +3799,7 @@
 		if (used + orig_bytes <= space_info->total_bytes) {
 			space_info->bytes_may_use += orig_bytes;
 			trace_btrfs_space_reservation(root->fs_info,
-					      "space_info",
-					      (u64)(unsigned long)space_info,
-					      orig_bytes, 1);
+				"space_info", space_info->flags, orig_bytes, 1);
 			ret = 0;
 		} else {
 			/*
@@ -3771,9 +3868,7 @@
 		if (used + num_bytes < space_info->total_bytes + avail) {
 			space_info->bytes_may_use += orig_bytes;
 			trace_btrfs_space_reservation(root->fs_info,
-					      "space_info",
-					      (u64)(unsigned long)space_info,
-					      orig_bytes, 1);
+				"space_info", space_info->flags, orig_bytes, 1);
 			ret = 0;
 		} else {
 			wait_ordered = true;
@@ -3836,8 +3931,9 @@
 	return ret;
 }
 
-static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
-					     struct btrfs_root *root)
+static struct btrfs_block_rsv *get_block_rsv(
+					const struct btrfs_trans_handle *trans,
+					const struct btrfs_root *root)
 {
 	struct btrfs_block_rsv *block_rsv = NULL;
 
@@ -3918,8 +4014,7 @@
 			spin_lock(&space_info->lock);
 			space_info->bytes_may_use -= num_bytes;
 			trace_btrfs_space_reservation(fs_info, "space_info",
-					      (u64)(unsigned long)space_info,
-					      num_bytes, 0);
+					space_info->flags, num_bytes, 0);
 			space_info->reservation_progress++;
 			spin_unlock(&space_info->lock);
 		}
@@ -4137,14 +4232,14 @@
 		block_rsv->reserved += num_bytes;
 		sinfo->bytes_may_use += num_bytes;
 		trace_btrfs_space_reservation(fs_info, "space_info",
-				      (u64)(unsigned long)sinfo, num_bytes, 1);
+				      sinfo->flags, num_bytes, 1);
 	}
 
 	if (block_rsv->reserved >= block_rsv->size) {
 		num_bytes = block_rsv->reserved - block_rsv->size;
 		sinfo->bytes_may_use -= num_bytes;
 		trace_btrfs_space_reservation(fs_info, "space_info",
-				      (u64)(unsigned long)sinfo, num_bytes, 0);
+				      sinfo->flags, num_bytes, 0);
 		sinfo->reservation_progress++;
 		block_rsv->reserved = block_rsv->size;
 		block_rsv->full = 1;
@@ -4198,12 +4293,12 @@
 		return;
 
 	trace_btrfs_space_reservation(root->fs_info, "transaction",
-				      (u64)(unsigned long)trans,
-				      trans->bytes_reserved, 0);
+				      trans->transid, trans->bytes_reserved, 0);
 	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
 	trans->bytes_reserved = 0;
 }
 
+/* Can only return 0 or -ENOSPC */
 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
 				  struct inode *inode)
 {
@@ -4540,7 +4635,7 @@
 	while (total) {
 		cache = btrfs_lookup_block_group(info, bytenr);
 		if (!cache)
-			return -1;
+			return -ENOENT;
 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
 				    BTRFS_BLOCK_GROUP_RAID1 |
 				    BTRFS_BLOCK_GROUP_RAID10))
@@ -4643,7 +4738,7 @@
 	struct btrfs_block_group_cache *cache;
 
 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
-	BUG_ON(!cache);
+	BUG_ON(!cache); /* Logic error */
 
 	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
 
@@ -4661,7 +4756,7 @@
 	struct btrfs_block_group_cache *cache;
 
 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
-	BUG_ON(!cache);
+	BUG_ON(!cache); /* Logic error */
 
 	/*
 	 * pull in the free space cache (if any) so that our pin
@@ -4706,6 +4801,7 @@
 {
 	struct btrfs_space_info *space_info = cache->space_info;
 	int ret = 0;
+
 	spin_lock(&space_info->lock);
 	spin_lock(&cache->lock);
 	if (reserve != RESERVE_FREE) {
@@ -4716,9 +4812,8 @@
 			space_info->bytes_reserved += num_bytes;
 			if (reserve == RESERVE_ALLOC) {
 				trace_btrfs_space_reservation(cache->fs_info,
-					      "space_info",
-					      (u64)(unsigned long)space_info,
-					      num_bytes, 0);
+						"space_info", space_info->flags,
+						num_bytes, 0);
 				space_info->bytes_may_use -= num_bytes;
 			}
 		}
@@ -4734,7 +4829,7 @@
 	return ret;
 }
 
-int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4764,7 +4859,6 @@
 	up_write(&fs_info->extent_commit_sem);
 
 	update_global_block_rsv(fs_info);
-	return 0;
 }
 
 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
@@ -4779,7 +4873,7 @@
 			if (cache)
 				btrfs_put_block_group(cache);
 			cache = btrfs_lookup_block_group(fs_info, start);
-			BUG_ON(!cache);
+			BUG_ON(!cache); /* Logic error */
 		}
 
 		len = cache->key.objectid + cache->key.offset - start;
@@ -4816,6 +4910,9 @@
 	u64 end;
 	int ret;
 
+	if (trans->aborted)
+		return 0;
+
 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
 		unpin = &fs_info->freed_extents[1];
 	else
@@ -4901,7 +4998,8 @@
 			ret = remove_extent_backref(trans, extent_root, path,
 						    NULL, refs_to_drop,
 						    is_data);
-			BUG_ON(ret);
+			if (ret)
+				goto abort;
 			btrfs_release_path(path);
 			path->leave_spinning = 1;
 
@@ -4919,10 +5017,11 @@
 					btrfs_print_leaf(extent_root,
 							 path->nodes[0]);
 			}
-			BUG_ON(ret);
+			if (ret < 0)
+				goto abort;
 			extent_slot = path->slots[0];
 		}
-	} else {
+	} else if (ret == -ENOENT) {
 		btrfs_print_leaf(extent_root, path->nodes[0]);
 		WARN_ON(1);
 		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
@@ -4932,6 +5031,8 @@
 		       (unsigned long long)root_objectid,
 		       (unsigned long long)owner_objectid,
 		       (unsigned long long)owner_offset);
+	} else {
+		goto abort;
 	}
 
 	leaf = path->nodes[0];
@@ -4941,7 +5042,8 @@
 		BUG_ON(found_extent || extent_slot != path->slots[0]);
 		ret = convert_extent_item_v0(trans, extent_root, path,
 					     owner_objectid, 0);
-		BUG_ON(ret < 0);
+		if (ret < 0)
+			goto abort;
 
 		btrfs_release_path(path);
 		path->leave_spinning = 1;
@@ -4958,7 +5060,8 @@
 			       (unsigned long long)bytenr);
 			btrfs_print_leaf(extent_root, path->nodes[0]);
 		}
-		BUG_ON(ret);
+		if (ret < 0)
+			goto abort;
 		extent_slot = path->slots[0];
 		leaf = path->nodes[0];
 		item_size = btrfs_item_size_nr(leaf, extent_slot);
@@ -4995,7 +5098,8 @@
 			ret = remove_extent_backref(trans, extent_root, path,
 						    iref, refs_to_drop,
 						    is_data);
-			BUG_ON(ret);
+			if (ret)
+				goto abort;
 		}
 	} else {
 		if (found_extent) {
@@ -5012,23 +5116,27 @@
 
 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
 				      num_to_del);
-		BUG_ON(ret);
+		if (ret)
+			goto abort;
 		btrfs_release_path(path);
 
 		if (is_data) {
 			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
-			BUG_ON(ret);
-		} else {
-			invalidate_mapping_pages(info->btree_inode->i_mapping,
-			     bytenr >> PAGE_CACHE_SHIFT,
-			     (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
+			if (ret)
+				goto abort;
 		}
 
 		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
-		BUG_ON(ret);
+		if (ret)
+			goto abort;
 	}
+out:
 	btrfs_free_path(path);
 	return ret;
+
+abort:
+	btrfs_abort_transaction(trans, extent_root, ret);
+	goto out;
 }
 
 /*
@@ -5124,7 +5232,7 @@
 					parent, root->root_key.objectid,
 					btrfs_header_level(buf),
 					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 
 	if (!last_ref)
@@ -5158,6 +5266,7 @@
 	btrfs_put_block_group(cache);
 }
 
+/* Can return -ENOMEM */
 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
 		      u64 owner, u64 offset, int for_cow)
@@ -5179,14 +5288,12 @@
 					num_bytes,
 					parent, root_objectid, (int)owner,
 					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
-		BUG_ON(ret);
 	} else {
 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
 						num_bytes,
 						parent, root_objectid, owner,
 						offset, BTRFS_DROP_DELAYED_REF,
 						NULL, for_cow);
-		BUG_ON(ret);
 	}
 	return ret;
 }
@@ -5243,28 +5350,34 @@
 	return 0;
 }
 
-static int get_block_group_index(struct btrfs_block_group_cache *cache)
+static int __get_block_group_index(u64 flags)
 {
 	int index;
-	if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
+
+	if (flags & BTRFS_BLOCK_GROUP_RAID10)
 		index = 0;
-	else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
+	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
 		index = 1;
-	else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
+	else if (flags & BTRFS_BLOCK_GROUP_DUP)
 		index = 2;
-	else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
+	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
 		index = 3;
 	else
 		index = 4;
+
 	return index;
 }
 
+static int get_block_group_index(struct btrfs_block_group_cache *cache)
+{
+	return __get_block_group_index(cache->flags);
+}
+
 enum btrfs_loop_type {
-	LOOP_FIND_IDEAL = 0,
-	LOOP_CACHING_NOWAIT = 1,
-	LOOP_CACHING_WAIT = 2,
-	LOOP_ALLOC_CHUNK = 3,
-	LOOP_NO_EMPTY_SIZE = 4,
+	LOOP_CACHING_NOWAIT = 0,
+	LOOP_CACHING_WAIT = 1,
+	LOOP_ALLOC_CHUNK = 2,
+	LOOP_NO_EMPTY_SIZE = 3,
 };
 
 /*
@@ -5278,7 +5391,6 @@
 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 				     struct btrfs_root *orig_root,
 				     u64 num_bytes, u64 empty_size,
-				     u64 search_start, u64 search_end,
 				     u64 hint_byte, struct btrfs_key *ins,
 				     u64 data)
 {
@@ -5287,6 +5399,7 @@
 	struct btrfs_free_cluster *last_ptr = NULL;
 	struct btrfs_block_group_cache *block_group = NULL;
 	struct btrfs_block_group_cache *used_block_group;
+	u64 search_start = 0;
 	int empty_cluster = 2 * 1024 * 1024;
 	int allowed_chunk_alloc = 0;
 	int done_chunk_alloc = 0;
@@ -5300,8 +5413,6 @@
 	bool failed_alloc = false;
 	bool use_cluster = true;
 	bool have_caching_bg = false;
-	u64 ideal_cache_percent = 0;
-	u64 ideal_cache_offset = 0;
 
 	WARN_ON(num_bytes < root->sectorsize);
 	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -5351,7 +5462,6 @@
 		empty_cluster = 0;
 
 	if (search_start == hint_byte) {
-ideal_cache:
 		block_group = btrfs_lookup_block_group(root->fs_info,
 						       search_start);
 		used_block_group = block_group;
@@ -5363,8 +5473,7 @@
 		 * picked out then we don't care that the block group is cached.
 		 */
 		if (block_group && block_group_bits(block_group, data) &&
-		    (block_group->cached != BTRFS_CACHE_NO ||
-		     search_start == ideal_cache_offset)) {
+		    block_group->cached != BTRFS_CACHE_NO) {
 			down_read(&space_info->groups_sem);
 			if (list_empty(&block_group->list) ||
 			    block_group->ro) {
@@ -5418,44 +5527,13 @@
 have_block_group:
 		cached = block_group_cache_done(block_group);
 		if (unlikely(!cached)) {
-			u64 free_percent;
-
 			found_uncached_bg = true;
 			ret = cache_block_group(block_group, trans,
-						orig_root, 1);
-			if (block_group->cached == BTRFS_CACHE_FINISHED)
-				goto alloc;
-
-			free_percent = btrfs_block_group_used(&block_group->item);
-			free_percent *= 100;
-			free_percent = div64_u64(free_percent,
-						 block_group->key.offset);
-			free_percent = 100 - free_percent;
-			if (free_percent > ideal_cache_percent &&
-			    likely(!block_group->ro)) {
-				ideal_cache_offset = block_group->key.objectid;
-				ideal_cache_percent = free_percent;
-			}
-
-			/*
-			 * The caching workers are limited to 2 threads, so we
-			 * can queue as much work as we care to.
-			 */
-			if (loop > LOOP_FIND_IDEAL) {
-				ret = cache_block_group(block_group, trans,
-							orig_root, 0);
-				BUG_ON(ret);
-			}
-
-			/*
-			 * If loop is set for cached only, try the next block
-			 * group.
-			 */
-			if (loop == LOOP_FIND_IDEAL)
-				goto loop;
+						orig_root, 0);
+			BUG_ON(ret < 0);
+			ret = 0;
 		}
 
-alloc:
 		if (unlikely(block_group->ro))
 			goto loop;
 
@@ -5606,11 +5684,6 @@
 		}
 checks:
 		search_start = stripe_align(root, offset);
-		/* move on to the next group */
-		if (search_start + num_bytes >= search_end) {
-			btrfs_add_free_space(used_block_group, offset, num_bytes);
-			goto loop;
-		}
 
 		/* move on to the next group */
 		if (search_start + num_bytes >
@@ -5661,9 +5734,7 @@
 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
 		goto search;
 
-	/* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
-	 *			for them to make caching progress.  Also
-	 *			determine the best possible bg to cache
+	/*
 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
 	 *			caching kthreads as we move along
 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
@@ -5673,50 +5744,17 @@
 	 */
 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
 		index = 0;
-		if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
-			found_uncached_bg = false;
-			loop++;
-			if (!ideal_cache_percent)
-				goto search;
-
-			/*
-			 * 1 of the following 2 things have happened so far
-			 *
-			 * 1) We found an ideal block group for caching that
-			 * is mostly full and will cache quickly, so we might
-			 * as well wait for it.
-			 *
-			 * 2) We searched for cached only and we didn't find
-			 * anything, and we didn't start any caching kthreads
-			 * either, so chances are we will loop through and
-			 * start a couple caching kthreads, and then come back
-			 * around and just wait for them.  This will be slower
-			 * because we will have 2 caching kthreads reading at
-			 * the same time when we could have just started one
-			 * and waited for it to get far enough to give us an
-			 * allocation, so go ahead and go to the wait caching
-			 * loop.
-			 */
-			loop = LOOP_CACHING_WAIT;
-			search_start = ideal_cache_offset;
-			ideal_cache_percent = 0;
-			goto ideal_cache;
-		} else if (loop == LOOP_FIND_IDEAL) {
-			/*
-			 * Didn't find a uncached bg, wait on anything we find
-			 * next.
-			 */
-			loop = LOOP_CACHING_WAIT;
-			goto search;
-		}
-
 		loop++;
-
 		if (loop == LOOP_ALLOC_CHUNK) {
 		       if (allowed_chunk_alloc) {
 				ret = do_chunk_alloc(trans, root, num_bytes +
 						     2 * 1024 * 1024, data,
 						     CHUNK_ALLOC_LIMITED);
+				if (ret < 0) {
+					btrfs_abort_transaction(trans,
+								root, ret);
+					goto out;
+				}
 				allowed_chunk_alloc = 0;
 				if (ret == 1)
 					done_chunk_alloc = 1;
@@ -5745,6 +5783,7 @@
 	} else if (ins->objectid) {
 		ret = 0;
 	}
+out:
 
 	return ret;
 }
@@ -5798,12 +5837,10 @@
 			 struct btrfs_root *root,
 			 u64 num_bytes, u64 min_alloc_size,
 			 u64 empty_size, u64 hint_byte,
-			 u64 search_end, struct btrfs_key *ins,
-			 u64 data)
+			 struct btrfs_key *ins, u64 data)
 {
 	bool final_tried = false;
 	int ret;
-	u64 search_start = 0;
 
 	data = btrfs_get_alloc_profile(root, data);
 again:
@@ -5811,23 +5848,31 @@
 	 * the only place that sets empty_size is btrfs_realloc_node, which
 	 * is not called recursively on allocations
 	 */
-	if (empty_size || root->ref_cows)
+	if (empty_size || root->ref_cows) {
 		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
 				     num_bytes + 2 * 1024 * 1024, data,
 				     CHUNK_ALLOC_NO_FORCE);
+		if (ret < 0 && ret != -ENOSPC) {
+			btrfs_abort_transaction(trans, root, ret);
+			return ret;
+		}
+	}
 
 	WARN_ON(num_bytes < root->sectorsize);
 	ret = find_free_extent(trans, root, num_bytes, empty_size,
-			       search_start, search_end, hint_byte,
-			       ins, data);
+			       hint_byte, ins, data);
 
 	if (ret == -ENOSPC) {
 		if (!final_tried) {
 			num_bytes = num_bytes >> 1;
 			num_bytes = num_bytes & ~(root->sectorsize - 1);
 			num_bytes = max(num_bytes, min_alloc_size);
-			do_chunk_alloc(trans, root->fs_info->extent_root,
+			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
 				       num_bytes, data, CHUNK_ALLOC_FORCE);
+			if (ret < 0 && ret != -ENOSPC) {
+				btrfs_abort_transaction(trans, root, ret);
+				return ret;
+			}
 			if (num_bytes == min_alloc_size)
 				final_tried = true;
 			goto again;
@@ -5838,7 +5883,8 @@
 			printk(KERN_ERR "btrfs allocation failed flags %llu, "
 			       "wanted %llu\n", (unsigned long long)data,
 			       (unsigned long long)num_bytes);
-			dump_space_info(sinfo, num_bytes, 1);
+			if (sinfo)
+				dump_space_info(sinfo, num_bytes, 1);
 		}
 	}
 
@@ -5917,7 +5963,10 @@
 	path->leave_spinning = 1;
 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
 				      ins, size);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_free_path(path);
+		return ret;
+	}
 
 	leaf = path->nodes[0];
 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -5947,7 +5996,7 @@
 	btrfs_free_path(path);
 
 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
-	if (ret) {
+	if (ret) { /* -ENOENT, logic error */
 		printk(KERN_ERR "btrfs update block group failed for %llu "
 		       "%llu\n", (unsigned long long)ins->objectid,
 		       (unsigned long long)ins->offset);
@@ -5978,7 +6027,10 @@
 	path->leave_spinning = 1;
 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
 				      ins, size);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_free_path(path);
+		return ret;
+	}
 
 	leaf = path->nodes[0];
 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -6008,7 +6060,7 @@
 	btrfs_free_path(path);
 
 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
-	if (ret) {
+	if (ret) { /* -ENOENT, logic error */
 		printk(KERN_ERR "btrfs update block group failed for %llu "
 		       "%llu\n", (unsigned long long)ins->objectid,
 		       (unsigned long long)ins->offset);
@@ -6056,28 +6108,28 @@
 	if (!caching_ctl) {
 		BUG_ON(!block_group_cache_done(block_group));
 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	} else {
 		mutex_lock(&caching_ctl->mutex);
 
 		if (start >= caching_ctl->progress) {
 			ret = add_excluded_extent(root, start, num_bytes);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 		} else if (start + num_bytes <= caching_ctl->progress) {
 			ret = btrfs_remove_free_space(block_group,
 						      start, num_bytes);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 		} else {
 			num_bytes = caching_ctl->progress - start;
 			ret = btrfs_remove_free_space(block_group,
 						      start, num_bytes);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 
 			start = caching_ctl->progress;
 			num_bytes = ins->objectid + ins->offset -
 				    caching_ctl->progress;
 			ret = add_excluded_extent(root, start, num_bytes);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 		}
 
 		mutex_unlock(&caching_ctl->mutex);
@@ -6086,7 +6138,7 @@
 
 	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
 					  RESERVE_ALLOC_NO_ACCOUNT);
-	BUG_ON(ret);
+	BUG_ON(ret); /* logic error */
 	btrfs_put_block_group(block_group);
 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
 					 0, owner, offset, ins, 1);
@@ -6107,6 +6159,7 @@
 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
 	btrfs_tree_lock(buf);
 	clean_tree_block(trans, root, buf);
+	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
 
 	btrfs_set_lock_blocking(buf);
 	btrfs_set_buffer_uptodate(buf);
@@ -6214,7 +6267,7 @@
 		return ERR_CAST(block_rsv);
 
 	ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
-				   empty_size, hint, (u64)-1, &ins, 0);
+				   empty_size, hint, &ins, 0);
 	if (ret) {
 		unuse_block_rsv(root->fs_info, block_rsv, blocksize);
 		return ERR_PTR(ret);
@@ -6222,7 +6275,7 @@
 
 	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
 				    blocksize, level);
-	BUG_ON(IS_ERR(buf));
+	BUG_ON(IS_ERR(buf)); /* -ENOMEM */
 
 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
 		if (parent == 0)
@@ -6234,7 +6287,7 @@
 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
 		struct btrfs_delayed_extent_op *extent_op;
 		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
-		BUG_ON(!extent_op);
+		BUG_ON(!extent_op); /* -ENOMEM */
 		if (key)
 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
 		else
@@ -6249,7 +6302,7 @@
 					ins.offset, parent, root_objectid,
 					level, BTRFS_ADD_DELAYED_EXTENT,
 					extent_op, for_cow);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 	return buf;
 }
@@ -6319,7 +6372,9 @@
 		/* We don't lock the tree block, it's OK to be racy here */
 		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
 					       &refs, &flags);
-		BUG_ON(ret);
+		/* We don't care about errors in readahead. */
+		if (ret < 0)
+			continue;
 		BUG_ON(refs == 0);
 
 		if (wc->stage == DROP_REFERENCE) {
@@ -6386,7 +6441,9 @@
 					       eb->start, eb->len,
 					       &wc->refs[level],
 					       &wc->flags[level]);
-		BUG_ON(ret);
+		BUG_ON(ret == -ENOMEM);
+		if (ret)
+			return ret;
 		BUG_ON(wc->refs[level] == 0);
 	}
 
@@ -6405,12 +6462,12 @@
 	if (!(wc->flags[level] & flag)) {
 		BUG_ON(!path->locks[level]);
 		ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 		ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
 						  eb->len, flag, 0);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 		wc->flags[level] |= flag;
 	}
 
@@ -6482,7 +6539,11 @@
 	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
 				       &wc->refs[level - 1],
 				       &wc->flags[level - 1]);
-	BUG_ON(ret);
+	if (ret < 0) {
+		btrfs_tree_unlock(next);
+		return ret;
+	}
+
 	BUG_ON(wc->refs[level - 1] == 0);
 	*lookup_info = 0;
 
@@ -6551,7 +6612,7 @@
 
 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
 				root->root_key.objectid, level - 1, 0, 0);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 	btrfs_tree_unlock(next);
 	free_extent_buffer(next);
@@ -6609,7 +6670,10 @@
 						       eb->start, eb->len,
 						       &wc->refs[level],
 						       &wc->flags[level]);
-			BUG_ON(ret);
+			if (ret < 0) {
+				btrfs_tree_unlock_rw(eb, path->locks[level]);
+				return ret;
+			}
 			BUG_ON(wc->refs[level] == 0);
 			if (wc->refs[level] == 1) {
 				btrfs_tree_unlock_rw(eb, path->locks[level]);
@@ -6629,7 +6693,7 @@
 			else
 				ret = btrfs_dec_ref(trans, root, eb, 0,
 						    wc->for_reloc);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM */
 		}
 		/* make block locked assertion in clean_tree_block happy */
 		if (!path->locks[level] &&
@@ -6738,7 +6802,7 @@
  * also make sure backrefs for the shared block and all lower level
  * blocks are properly updated.
  */
-void btrfs_drop_snapshot(struct btrfs_root *root,
+int btrfs_drop_snapshot(struct btrfs_root *root,
 			 struct btrfs_block_rsv *block_rsv, int update_ref,
 			 int for_reloc)
 {
@@ -6766,7 +6830,10 @@
 	}
 
 	trans = btrfs_start_transaction(tree_root, 0);
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans)) {
+		err = PTR_ERR(trans);
+		goto out_free;
+	}
 
 	if (block_rsv)
 		trans->block_rsv = block_rsv;
@@ -6791,7 +6858,7 @@
 		path->lowest_level = 0;
 		if (ret < 0) {
 			err = ret;
-			goto out_free;
+			goto out_end_trans;
 		}
 		WARN_ON(ret > 0);
 
@@ -6811,7 +6878,10 @@
 						path->nodes[level]->len,
 						&wc->refs[level],
 						&wc->flags[level]);
-			BUG_ON(ret);
+			if (ret < 0) {
+				err = ret;
+				goto out_end_trans;
+			}
 			BUG_ON(wc->refs[level] == 0);
 
 			if (level == root_item->drop_level)
@@ -6862,26 +6932,40 @@
 			ret = btrfs_update_root(trans, tree_root,
 						&root->root_key,
 						root_item);
-			BUG_ON(ret);
+			if (ret) {
+				btrfs_abort_transaction(trans, tree_root, ret);
+				err = ret;
+				goto out_end_trans;
+			}
 
 			btrfs_end_transaction_throttle(trans, tree_root);
 			trans = btrfs_start_transaction(tree_root, 0);
-			BUG_ON(IS_ERR(trans));
+			if (IS_ERR(trans)) {
+				err = PTR_ERR(trans);
+				goto out_free;
+			}
 			if (block_rsv)
 				trans->block_rsv = block_rsv;
 		}
 	}
 	btrfs_release_path(path);
-	BUG_ON(err);
+	if (err)
+		goto out_end_trans;
 
 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, tree_root, ret);
+		goto out_end_trans;
+	}
 
 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
 		ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
 					   NULL, NULL);
-		BUG_ON(ret < 0);
-		if (ret > 0) {
+		if (ret < 0) {
+			btrfs_abort_transaction(trans, tree_root, ret);
+			err = ret;
+			goto out_end_trans;
+		} else if (ret > 0) {
 			/* if we fail to delete the orphan item this time
 			 * around, it'll get picked up the next time.
 			 *
@@ -6899,14 +6983,15 @@
 		free_extent_buffer(root->commit_root);
 		kfree(root);
 	}
-out_free:
+out_end_trans:
 	btrfs_end_transaction_throttle(trans, tree_root);
+out_free:
 	kfree(wc);
 	btrfs_free_path(path);
 out:
 	if (err)
 		btrfs_std_error(root->fs_info, err);
-	return;
+	return err;
 }
 
 /*
@@ -6983,31 +7068,15 @@
 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
 {
 	u64 num_devices;
-	u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
-		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
+	u64 stripped;
 
-	if (root->fs_info->balance_ctl) {
-		struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
-		u64 tgt = 0;
-
-		/* pick restriper's target profile and return */
-		if (flags & BTRFS_BLOCK_GROUP_DATA &&
-		    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
-			tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
-		} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
-			   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
-			tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
-		} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
-			   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
-			tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
-		}
-
-		if (tgt) {
-			/* extended -> chunk profile */
-			tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-			return tgt;
-		}
-	}
+	/*
+	 * if restripe for this chunk_type is on pick target profile and
+	 * return, otherwise do the usual balance
+	 */
+	stripped = get_restripe_target(root->fs_info, flags);
+	if (stripped)
+		return extended_to_chunk(stripped);
 
 	/*
 	 * we add in the count of missing devices because we want
@@ -7017,6 +7086,9 @@
 	num_devices = root->fs_info->fs_devices->rw_devices +
 		root->fs_info->fs_devices->missing_devices;
 
+	stripped = BTRFS_BLOCK_GROUP_RAID0 |
+		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
+
 	if (num_devices == 1) {
 		stripped |= BTRFS_BLOCK_GROUP_DUP;
 		stripped = flags & ~stripped;
@@ -7029,7 +7101,6 @@
 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
 			     BTRFS_BLOCK_GROUP_RAID10))
 			return stripped | BTRFS_BLOCK_GROUP_DUP;
-		return flags;
 	} else {
 		/* they already had raid on here, just return */
 		if (flags & stripped)
@@ -7042,9 +7113,9 @@
 		if (flags & BTRFS_BLOCK_GROUP_DUP)
 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
 
-		/* turn single device chunks into raid0 */
-		return stripped | BTRFS_BLOCK_GROUP_RAID0;
+		/* this is drive concat, leave it alone */
 	}
+
 	return flags;
 }
 
@@ -7103,12 +7174,16 @@
 	BUG_ON(cache->ro);
 
 	trans = btrfs_join_transaction(root);
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 
 	alloc_flags = update_block_group_flags(root, cache->flags);
-	if (alloc_flags != cache->flags)
-		do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
-			       CHUNK_ALLOC_FORCE);
+	if (alloc_flags != cache->flags) {
+		ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+				     CHUNK_ALLOC_FORCE);
+		if (ret < 0)
+			goto out;
+	}
 
 	ret = set_block_group_ro(cache, 0);
 	if (!ret)
@@ -7188,7 +7263,7 @@
 	return free_bytes;
 }
 
-int btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_set_block_group_rw(struct btrfs_root *root,
 			      struct btrfs_block_group_cache *cache)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
@@ -7204,7 +7279,6 @@
 	cache->ro = 0;
 	spin_unlock(&cache->lock);
 	spin_unlock(&sinfo->lock);
-	return 0;
 }
 
 /*
@@ -7222,6 +7296,7 @@
 	u64 min_free;
 	u64 dev_min = 1;
 	u64 dev_nr = 0;
+	u64 target;
 	int index;
 	int full = 0;
 	int ret = 0;
@@ -7262,13 +7337,11 @@
 	/*
 	 * ok we don't have enough space, but maybe we have free space on our
 	 * devices to allocate new chunks for relocation, so loop through our
-	 * alloc devices and guess if we have enough space.  However, if we
-	 * were marked as full, then we know there aren't enough chunks, and we
-	 * can just return.
+	 * alloc devices and guess if we have enough space.  if this block
+	 * group is going to be restriped, run checks against the target
+	 * profile instead of the current one.
 	 */
 	ret = -1;
-	if (full)
-		goto out;
 
 	/*
 	 * index:
@@ -7278,7 +7351,20 @@
 	 *      3: raid0
 	 *      4: single
 	 */
-	index = get_block_group_index(block_group);
+	target = get_restripe_target(root->fs_info, block_group->flags);
+	if (target) {
+		index = __get_block_group_index(extended_to_chunk(target));
+	} else {
+		/*
+		 * this is just a balance, so if we were marked as full
+		 * we know there is no space for a new chunk
+		 */
+		if (full)
+			goto out;
+
+		index = get_block_group_index(block_group);
+	}
+
 	if (index == 0) {
 		dev_min = 4;
 		/* Divide by 2 */
@@ -7572,7 +7658,7 @@
 		ret = update_space_info(info, cache->flags, found_key.offset,
 					btrfs_block_group_used(&cache->item),
 					&space_info);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 		cache->space_info = space_info;
 		spin_lock(&cache->space_info->lock);
 		cache->space_info->bytes_readonly += cache->bytes_super;
@@ -7581,7 +7667,7 @@
 		__link_block_group(space_info, cache);
 
 		ret = btrfs_add_block_group_cache(root->fs_info, cache);
-		BUG_ON(ret);
+		BUG_ON(ret); /* Logic error */
 
 		set_avail_alloc_bits(root->fs_info, cache->flags);
 		if (btrfs_chunk_readonly(root, cache->key.objectid))
@@ -7663,7 +7749,7 @@
 
 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
 				&cache->space_info);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 	update_global_block_rsv(root->fs_info);
 
 	spin_lock(&cache->space_info->lock);
@@ -7673,11 +7759,14 @@
 	__link_block_group(cache->space_info, cache);
 
 	ret = btrfs_add_block_group_cache(root->fs_info, cache);
-	BUG_ON(ret);
+	BUG_ON(ret); /* Logic error */
 
 	ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
 				sizeof(cache->item));
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, extent_root, ret);
+		return ret;
+	}
 
 	set_avail_alloc_bits(extent_root->fs_info, type);
 
@@ -7686,11 +7775,8 @@
 
 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
-	u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
-	/* chunk -> extended profile */
-	if (extra_flags == 0)
-		extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+	u64 extra_flags = chunk_to_extended(flags) &
+				BTRFS_EXTENDED_PROFILE_MASK;
 
 	if (flags & BTRFS_BLOCK_GROUP_DATA)
 		fs_info->avail_data_alloc_bits &= ~extra_flags;
@@ -7758,7 +7844,10 @@
 	inode = lookup_free_space_inode(tree_root, block_group, path);
 	if (!IS_ERR(inode)) {
 		ret = btrfs_orphan_add(trans, inode);
-		BUG_ON(ret);
+		if (ret) {
+			btrfs_add_delayed_iput(inode);
+			goto out;
+		}
 		clear_nlink(inode);
 		/* One for the block groups ref */
 		spin_lock(&block_group->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2862454..8d904dd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -19,6 +19,7 @@
 #include "btrfs_inode.h"
 #include "volumes.h"
 #include "check-integrity.h"
+#include "locking.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -53,6 +54,13 @@
 	unsigned int sync_io:1;
 };
 
+static noinline void flush_write_bio(void *data);
+static inline struct btrfs_fs_info *
+tree_fs_info(struct extent_io_tree *tree)
+{
+	return btrfs_sb(tree->mapping->host->i_sb);
+}
+
 int __init extent_io_init(void)
 {
 	extent_state_cache = kmem_cache_create("extent_state",
@@ -136,6 +144,7 @@
 #endif
 	atomic_set(&state->refs, 1);
 	init_waitqueue_head(&state->wq);
+	trace_alloc_extent_state(state, mask, _RET_IP_);
 	return state;
 }
 
@@ -153,6 +162,7 @@
 		list_del(&state->leak_list);
 		spin_unlock_irqrestore(&leak_lock, flags);
 #endif
+		trace_free_extent_state(state, _RET_IP_);
 		kmem_cache_free(extent_state_cache, state);
 	}
 }
@@ -439,6 +449,13 @@
 	return prealloc;
 }
 
+void extent_io_tree_panic(struct extent_io_tree *tree, int err)
+{
+	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
+		    "Extent tree was modified by another "
+		    "thread while locked.");
+}
+
 /*
  * clear some bits on a range in the tree.  This may require splitting
  * or inserting elements in the tree, so the gfp mask is used to
@@ -449,8 +466,7 @@
  *
  * the range [start, end] is inclusive.
  *
- * This takes the tree lock, and returns < 0 on error, > 0 if any of the
- * bits were already set, or zero if none of the bits were already set.
+ * This takes the tree lock, and returns 0 on success and < 0 on error.
  */
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		     int bits, int wake, int delete,
@@ -464,7 +480,6 @@
 	struct rb_node *node;
 	u64 last_end;
 	int err;
-	int set = 0;
 	int clear = 0;
 
 	if (delete)
@@ -542,12 +557,14 @@
 		prealloc = alloc_extent_state_atomic(prealloc);
 		BUG_ON(!prealloc);
 		err = split_state(tree, state, prealloc, start);
-		BUG_ON(err == -EEXIST);
+		if (err)
+			extent_io_tree_panic(tree, err);
+
 		prealloc = NULL;
 		if (err)
 			goto out;
 		if (state->end <= end) {
-			set |= clear_state_bit(tree, state, &bits, wake);
+			clear_state_bit(tree, state, &bits, wake);
 			if (last_end == (u64)-1)
 				goto out;
 			start = last_end + 1;
@@ -564,17 +581,19 @@
 		prealloc = alloc_extent_state_atomic(prealloc);
 		BUG_ON(!prealloc);
 		err = split_state(tree, state, prealloc, end + 1);
-		BUG_ON(err == -EEXIST);
+		if (err)
+			extent_io_tree_panic(tree, err);
+
 		if (wake)
 			wake_up(&state->wq);
 
-		set |= clear_state_bit(tree, prealloc, &bits, wake);
+		clear_state_bit(tree, prealloc, &bits, wake);
 
 		prealloc = NULL;
 		goto out;
 	}
 
-	set |= clear_state_bit(tree, state, &bits, wake);
+	clear_state_bit(tree, state, &bits, wake);
 next:
 	if (last_end == (u64)-1)
 		goto out;
@@ -591,7 +610,7 @@
 	if (prealloc)
 		free_extent_state(prealloc);
 
-	return set;
+	return 0;
 
 search_again:
 	if (start > end)
@@ -602,8 +621,8 @@
 	goto again;
 }
 
-static int wait_on_state(struct extent_io_tree *tree,
-			 struct extent_state *state)
+static void wait_on_state(struct extent_io_tree *tree,
+			  struct extent_state *state)
 		__releases(tree->lock)
 		__acquires(tree->lock)
 {
@@ -613,7 +632,6 @@
 	schedule();
 	spin_lock(&tree->lock);
 	finish_wait(&state->wq, &wait);
-	return 0;
 }
 
 /*
@@ -621,7 +639,7 @@
  * The range [start, end] is inclusive.
  * The tree lock is taken by this function
  */
-int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
+void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
 {
 	struct extent_state *state;
 	struct rb_node *node;
@@ -658,7 +676,6 @@
 	}
 out:
 	spin_unlock(&tree->lock);
-	return 0;
 }
 
 static void set_state_bits(struct extent_io_tree *tree,
@@ -706,9 +723,10 @@
  * [start, end] is inclusive This takes the tree lock.
  */
 
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   int bits, int exclusive_bits, u64 *failed_start,
-		   struct extent_state **cached_state, gfp_t mask)
+static int __must_check
+__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		 int bits, int exclusive_bits, u64 *failed_start,
+		 struct extent_state **cached_state, gfp_t mask)
 {
 	struct extent_state *state;
 	struct extent_state *prealloc = NULL;
@@ -742,8 +760,10 @@
 		prealloc = alloc_extent_state_atomic(prealloc);
 		BUG_ON(!prealloc);
 		err = insert_state(tree, prealloc, start, end, &bits);
+		if (err)
+			extent_io_tree_panic(tree, err);
+
 		prealloc = NULL;
-		BUG_ON(err == -EEXIST);
 		goto out;
 	}
 	state = rb_entry(node, struct extent_state, rb_node);
@@ -809,7 +829,9 @@
 		prealloc = alloc_extent_state_atomic(prealloc);
 		BUG_ON(!prealloc);
 		err = split_state(tree, state, prealloc, start);
-		BUG_ON(err == -EEXIST);
+		if (err)
+			extent_io_tree_panic(tree, err);
+
 		prealloc = NULL;
 		if (err)
 			goto out;
@@ -846,12 +868,9 @@
 		 */
 		err = insert_state(tree, prealloc, start, this_end,
 				   &bits);
-		BUG_ON(err == -EEXIST);
-		if (err) {
-			free_extent_state(prealloc);
-			prealloc = NULL;
-			goto out;
-		}
+		if (err)
+			extent_io_tree_panic(tree, err);
+
 		cache_state(prealloc, cached_state);
 		prealloc = NULL;
 		start = this_end + 1;
@@ -873,7 +892,8 @@
 		prealloc = alloc_extent_state_atomic(prealloc);
 		BUG_ON(!prealloc);
 		err = split_state(tree, state, prealloc, end + 1);
-		BUG_ON(err == -EEXIST);
+		if (err)
+			extent_io_tree_panic(tree, err);
 
 		set_state_bits(tree, prealloc, &bits);
 		cache_state(prealloc, cached_state);
@@ -900,6 +920,15 @@
 	goto again;
 }
 
+int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
+		   u64 *failed_start, struct extent_state **cached_state,
+		   gfp_t mask)
+{
+	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
+				cached_state, mask);
+}
+
+
 /**
  * convert_extent - convert all bits in a given range from one bit to another
  * @tree:	the io tree to search
@@ -946,7 +975,8 @@
 		}
 		err = insert_state(tree, prealloc, start, end, &bits);
 		prealloc = NULL;
-		BUG_ON(err == -EEXIST);
+		if (err)
+			extent_io_tree_panic(tree, err);
 		goto out;
 	}
 	state = rb_entry(node, struct extent_state, rb_node);
@@ -1002,7 +1032,8 @@
 			goto out;
 		}
 		err = split_state(tree, state, prealloc, start);
-		BUG_ON(err == -EEXIST);
+		if (err)
+			extent_io_tree_panic(tree, err);
 		prealloc = NULL;
 		if (err)
 			goto out;
@@ -1041,12 +1072,8 @@
 		 */
 		err = insert_state(tree, prealloc, start, this_end,
 				   &bits);
-		BUG_ON(err == -EEXIST);
-		if (err) {
-			free_extent_state(prealloc);
-			prealloc = NULL;
-			goto out;
-		}
+		if (err)
+			extent_io_tree_panic(tree, err);
 		prealloc = NULL;
 		start = this_end + 1;
 		goto search_again;
@@ -1065,7 +1092,8 @@
 		}
 
 		err = split_state(tree, state, prealloc, end + 1);
-		BUG_ON(err == -EEXIST);
+		if (err)
+			extent_io_tree_panic(tree, err);
 
 		set_state_bits(tree, prealloc, &bits);
 		clear_state_bit(tree, prealloc, &clear_bits, 0);
@@ -1095,14 +1123,14 @@
 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 		     gfp_t mask)
 {
-	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
+	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
 			      NULL, mask);
 }
 
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 		    int bits, gfp_t mask)
 {
-	return set_extent_bit(tree, start, end, bits, 0, NULL,
+	return set_extent_bit(tree, start, end, bits, NULL,
 			      NULL, mask);
 }
 
@@ -1117,7 +1145,7 @@
 {
 	return set_extent_bit(tree, start, end,
 			      EXTENT_DELALLOC | EXTENT_UPTODATE,
-			      0, NULL, cached_state, mask);
+			      NULL, cached_state, mask);
 }
 
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1131,7 +1159,7 @@
 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 		     gfp_t mask)
 {
-	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
+	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
 			      NULL, mask);
 }
 
@@ -1139,7 +1167,7 @@
 			struct extent_state **cached_state, gfp_t mask)
 {
 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
-			      NULL, cached_state, mask);
+			      cached_state, mask);
 }
 
 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1155,42 +1183,40 @@
  * us if waiting is desired.
  */
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		     int bits, struct extent_state **cached_state, gfp_t mask)
+		     int bits, struct extent_state **cached_state)
 {
 	int err;
 	u64 failed_start;
 	while (1) {
-		err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
-				     EXTENT_LOCKED, &failed_start,
-				     cached_state, mask);
-		if (err == -EEXIST && (mask & __GFP_WAIT)) {
+		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
+				       EXTENT_LOCKED, &failed_start,
+				       cached_state, GFP_NOFS);
+		if (err == -EEXIST) {
 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
 			start = failed_start;
-		} else {
+		} else
 			break;
-		}
 		WARN_ON(start > end);
 	}
 	return err;
 }
 
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 {
-	return lock_extent_bits(tree, start, end, 0, NULL, mask);
+	return lock_extent_bits(tree, start, end, 0, NULL);
 }
 
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-		    gfp_t mask)
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 {
 	int err;
 	u64 failed_start;
 
-	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
-			     &failed_start, NULL, mask);
+	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
+			       &failed_start, NULL, GFP_NOFS);
 	if (err == -EEXIST) {
 		if (failed_start > start)
 			clear_extent_bit(tree, start, failed_start - 1,
-					 EXTENT_LOCKED, 1, 0, NULL, mask);
+					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
 		return 0;
 	}
 	return 1;
@@ -1203,10 +1229,10 @@
 				mask);
 }
 
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 {
 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
-				mask);
+				GFP_NOFS);
 }
 
 /*
@@ -1220,7 +1246,7 @@
 
 	while (index <= end_index) {
 		page = find_get_page(tree->mapping, index);
-		BUG_ON(!page);
+		BUG_ON(!page); /* Pages should be in the extent_io_tree */
 		set_page_writeback(page);
 		page_cache_release(page);
 		index++;
@@ -1343,9 +1369,9 @@
 	return found;
 }
 
-static noinline int __unlock_for_delalloc(struct inode *inode,
-					  struct page *locked_page,
-					  u64 start, u64 end)
+static noinline void __unlock_for_delalloc(struct inode *inode,
+					   struct page *locked_page,
+					   u64 start, u64 end)
 {
 	int ret;
 	struct page *pages[16];
@@ -1355,7 +1381,7 @@
 	int i;
 
 	if (index == locked_page->index && end_index == index)
-		return 0;
+		return;
 
 	while (nr_pages > 0) {
 		ret = find_get_pages_contig(inode->i_mapping, index,
@@ -1370,7 +1396,6 @@
 		index += ret;
 		cond_resched();
 	}
-	return 0;
 }
 
 static noinline int lock_delalloc_pages(struct inode *inode,
@@ -1500,11 +1525,10 @@
 			goto out_failed;
 		}
 	}
-	BUG_ON(ret);
+	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
 
 	/* step three, lock the state bits for the whole range */
-	lock_extent_bits(tree, delalloc_start, delalloc_end,
-			 0, &cached_state, GFP_NOFS);
+	lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
 
 	/* then test to make sure it is all still delalloc */
 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
@@ -1761,39 +1785,34 @@
  * helper function to set a given page up to date if all the
  * extents in the tree for that page are up to date
  */
-static int check_page_uptodate(struct extent_io_tree *tree,
-			       struct page *page)
+static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
 {
 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
 	u64 end = start + PAGE_CACHE_SIZE - 1;
 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
 		SetPageUptodate(page);
-	return 0;
 }
 
 /*
  * helper function to unlock a page if all the extents in the tree
  * for that page are unlocked
  */
-static int check_page_locked(struct extent_io_tree *tree,
-			     struct page *page)
+static void check_page_locked(struct extent_io_tree *tree, struct page *page)
 {
 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
 	u64 end = start + PAGE_CACHE_SIZE - 1;
 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
 		unlock_page(page);
-	return 0;
 }
 
 /*
  * helper function to end page writeback if all the extents
  * in the tree for that page are done with writeback
  */
-static int check_page_writeback(struct extent_io_tree *tree,
-			     struct page *page)
+static void check_page_writeback(struct extent_io_tree *tree,
+				 struct page *page)
 {
 	end_page_writeback(page);
-	return 0;
 }
 
 /*
@@ -1912,6 +1931,26 @@
 	return 0;
 }
 
+int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
+			 int mirror_num)
+{
+	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+	u64 start = eb->start;
+	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
+	int ret;
+
+	for (i = 0; i < num_pages; i++) {
+		struct page *p = extent_buffer_page(eb, i);
+		ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
+					start, p, mirror_num);
+		if (ret)
+			break;
+		start += PAGE_CACHE_SIZE;
+	}
+
+	return ret;
+}
+
 /*
  * each time an IO finishes, we do a fast check in the IO failure tree
  * to see if we need to process or clean up an io_failure_record
@@ -2258,6 +2297,7 @@
 	u64 start;
 	u64 end;
 	int whole_page;
+	int failed_mirror;
 	int ret;
 
 	if (err)
@@ -2304,9 +2344,16 @@
 			else
 				clean_io_failure(start, page);
 		}
-		if (!uptodate) {
-			int failed_mirror;
+
+		if (!uptodate)
 			failed_mirror = (int)(unsigned long)bio->bi_bdev;
+
+		if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
+			ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
+			if (!ret && !err &&
+			    test_bit(BIO_UPTODATE, &bio->bi_flags))
+				uptodate = 1;
+		} else if (!uptodate) {
 			/*
 			 * The generic bio_readpage_error handles errors the
 			 * following way: If possible, new read requests are
@@ -2320,7 +2367,6 @@
 			ret = bio_readpage_error(bio, page, start, end,
 							failed_mirror, NULL);
 			if (ret == 0) {
-error_handled:
 				uptodate =
 					test_bit(BIO_UPTODATE, &bio->bi_flags);
 				if (err)
@@ -2328,16 +2374,9 @@
 				uncache_state(&cached);
 				continue;
 			}
-			if (tree->ops && tree->ops->readpage_io_failed_hook) {
-				ret = tree->ops->readpage_io_failed_hook(
-							bio, page, start, end,
-							failed_mirror, state);
-				if (ret == 0)
-					goto error_handled;
-			}
 		}
 
-		if (uptodate) {
+		if (uptodate && tree->track_uptodate) {
 			set_extent_uptodate(tree, start, end, &cached,
 					    GFP_ATOMIC);
 		}
@@ -2386,8 +2425,12 @@
 	return bio;
 }
 
-static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
-			  unsigned long bio_flags)
+/*
+ * Since writes are async, they will only return -ENOMEM.
+ * Reads can return the full range of I/O error conditions.
+ */
+static int __must_check submit_one_bio(int rw, struct bio *bio,
+				       int mirror_num, unsigned long bio_flags)
 {
 	int ret = 0;
 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -2413,6 +2456,19 @@
 	return ret;
 }
 
+static int merge_bio(struct extent_io_tree *tree, struct page *page,
+		     unsigned long offset, size_t size, struct bio *bio,
+		     unsigned long bio_flags)
+{
+	int ret = 0;
+	if (tree->ops && tree->ops->merge_bio_hook)
+		ret = tree->ops->merge_bio_hook(page, offset, size, bio,
+						bio_flags);
+	BUG_ON(ret < 0);
+	return ret;
+
+}
+
 static int submit_extent_page(int rw, struct extent_io_tree *tree,
 			      struct page *page, sector_t sector,
 			      size_t size, unsigned long offset,
@@ -2441,12 +2497,12 @@
 				sector;
 
 		if (prev_bio_flags != bio_flags || !contig ||
-		    (tree->ops && tree->ops->merge_bio_hook &&
-		     tree->ops->merge_bio_hook(page, offset, page_size, bio,
-					       bio_flags)) ||
+		    merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
 		    bio_add_page(bio, page, page_size, offset) < page_size) {
 			ret = submit_one_bio(rw, bio, mirror_num,
 					     prev_bio_flags);
+			if (ret < 0)
+				return ret;
 			bio = NULL;
 		} else {
 			return 0;
@@ -2473,6 +2529,17 @@
 	return ret;
 }
 
+void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
+{
+	if (!PagePrivate(page)) {
+		SetPagePrivate(page);
+		page_cache_get(page);
+		set_page_private(page, (unsigned long)eb);
+	} else {
+		WARN_ON(page->private != (unsigned long)eb);
+	}
+}
+
 void set_page_extent_mapped(struct page *page)
 {
 	if (!PagePrivate(page)) {
@@ -2482,16 +2549,11 @@
 	}
 }
 
-static void set_page_extent_head(struct page *page, unsigned long len)
-{
-	WARN_ON(!PagePrivate(page));
-	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
-}
-
 /*
  * basic readpage implementation.  Locked extent state structs are inserted
  * into the tree that are removed when the IO is done (by the end_io
  * handlers)
+ * XXX JDM: This needs looking at to ensure proper page locking
  */
 static int __extent_read_full_page(struct extent_io_tree *tree,
 				   struct page *page,
@@ -2531,11 +2593,11 @@
 
 	end = page_end;
 	while (1) {
-		lock_extent(tree, start, end, GFP_NOFS);
+		lock_extent(tree, start, end);
 		ordered = btrfs_lookup_ordered_extent(inode, start);
 		if (!ordered)
 			break;
-		unlock_extent(tree, start, end, GFP_NOFS);
+		unlock_extent(tree, start, end);
 		btrfs_start_ordered_extent(inode, ordered, 1);
 		btrfs_put_ordered_extent(ordered);
 	}
@@ -2572,7 +2634,7 @@
 				end - cur + 1, 0);
 		if (IS_ERR_OR_NULL(em)) {
 			SetPageError(page);
-			unlock_extent(tree, cur, end, GFP_NOFS);
+			unlock_extent(tree, cur, end);
 			break;
 		}
 		extent_offset = cur - em->start;
@@ -2624,7 +2686,7 @@
 		if (test_range_bit(tree, cur, cur_end,
 				   EXTENT_UPTODATE, 1, NULL)) {
 			check_page_uptodate(tree, page);
-			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+			unlock_extent(tree, cur, cur + iosize - 1);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -2634,7 +2696,7 @@
 		 */
 		if (block_start == EXTENT_MAP_INLINE) {
 			SetPageError(page);
-			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+			unlock_extent(tree, cur, cur + iosize - 1);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -2654,6 +2716,7 @@
 					 end_bio_extent_readpage, mirror_num,
 					 *bio_flags,
 					 this_bio_flag);
+			BUG_ON(ret == -ENOMEM);
 			nr++;
 			*bio_flags = this_bio_flag;
 		}
@@ -2795,7 +2858,11 @@
 						       delalloc_end,
 						       &page_started,
 						       &nr_written);
-			BUG_ON(ret);
+			/* File system has been set read-only */
+			if (ret) {
+				SetPageError(page);
+				goto done;
+			}
 			/*
 			 * delalloc_end is already one less than the total
 			 * length, so we don't subtract one from
@@ -2968,6 +3035,275 @@
 	return 0;
 }
 
+static int eb_wait(void *word)
+{
+	io_schedule();
+	return 0;
+}
+
+static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+{
+	wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
+		    TASK_UNINTERRUPTIBLE);
+}
+
+static int lock_extent_buffer_for_io(struct extent_buffer *eb,
+				     struct btrfs_fs_info *fs_info,
+				     struct extent_page_data *epd)
+{
+	unsigned long i, num_pages;
+	int flush = 0;
+	int ret = 0;
+
+	if (!btrfs_try_tree_write_lock(eb)) {
+		flush = 1;
+		flush_write_bio(epd);
+		btrfs_tree_lock(eb);
+	}
+
+	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
+		btrfs_tree_unlock(eb);
+		if (!epd->sync_io)
+			return 0;
+		if (!flush) {
+			flush_write_bio(epd);
+			flush = 1;
+		}
+		while (1) {
+			wait_on_extent_buffer_writeback(eb);
+			btrfs_tree_lock(eb);
+			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
+				break;
+			btrfs_tree_unlock(eb);
+		}
+	}
+
+	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+		spin_lock(&fs_info->delalloc_lock);
+		if (fs_info->dirty_metadata_bytes >= eb->len)
+			fs_info->dirty_metadata_bytes -= eb->len;
+		else
+			WARN_ON(1);
+		spin_unlock(&fs_info->delalloc_lock);
+		ret = 1;
+	}
+
+	btrfs_tree_unlock(eb);
+
+	if (!ret)
+		return ret;
+
+	num_pages = num_extent_pages(eb->start, eb->len);
+	for (i = 0; i < num_pages; i++) {
+		struct page *p = extent_buffer_page(eb, i);
+
+		if (!trylock_page(p)) {
+			if (!flush) {
+				flush_write_bio(epd);
+				flush = 1;
+			}
+			lock_page(p);
+		}
+	}
+
+	return ret;
+}
+
+static void end_extent_buffer_writeback(struct extent_buffer *eb)
+{
+	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+	smp_mb__after_clear_bit();
+	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+}
+
+static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
+{
+	int uptodate = err == 0;
+	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct extent_buffer *eb;
+	int done;
+
+	do {
+		struct page *page = bvec->bv_page;
+
+		bvec--;
+		eb = (struct extent_buffer *)page->private;
+		BUG_ON(!eb);
+		done = atomic_dec_and_test(&eb->io_pages);
+
+		if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+			ClearPageUptodate(page);
+			SetPageError(page);
+		}
+
+		end_page_writeback(page);
+
+		if (!done)
+			continue;
+
+		end_extent_buffer_writeback(eb);
+	} while (bvec >= bio->bi_io_vec);
+
+	bio_put(bio);
+
+}
+
+static int write_one_eb(struct extent_buffer *eb,
+			struct btrfs_fs_info *fs_info,
+			struct writeback_control *wbc,
+			struct extent_page_data *epd)
+{
+	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
+	u64 offset = eb->start;
+	unsigned long i, num_pages;
+	int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
+	int ret;
+
+	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+	num_pages = num_extent_pages(eb->start, eb->len);
+	atomic_set(&eb->io_pages, num_pages);
+	for (i = 0; i < num_pages; i++) {
+		struct page *p = extent_buffer_page(eb, i);
+
+		clear_page_dirty_for_io(p);
+		set_page_writeback(p);
+		ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
+					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+					 -1, end_bio_extent_buffer_writepage,
+					 0, 0, 0);
+		if (ret) {
+			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+			SetPageError(p);
+			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
+				end_extent_buffer_writeback(eb);
+			ret = -EIO;
+			break;
+		}
+		offset += PAGE_CACHE_SIZE;
+		update_nr_written(p, wbc, 1);
+		unlock_page(p);
+	}
+
+	if (unlikely(ret)) {
+		for (; i < num_pages; i++) {
+			struct page *p = extent_buffer_page(eb, i);
+			unlock_page(p);
+		}
+	}
+
+	return ret;
+}
+
+int btree_write_cache_pages(struct address_space *mapping,
+				   struct writeback_control *wbc)
+{
+	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
+	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
+	struct extent_buffer *eb, *prev_eb = NULL;
+	struct extent_page_data epd = {
+		.bio = NULL,
+		.tree = tree,
+		.extent_locked = 0,
+		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
+	};
+	int ret = 0;
+	int done = 0;
+	int nr_to_write_done = 0;
+	struct pagevec pvec;
+	int nr_pages;
+	pgoff_t index;
+	pgoff_t end;		/* Inclusive */
+	int scanned = 0;
+	int tag;
+
+	pagevec_init(&pvec, 0);
+	if (wbc->range_cyclic) {
+		index = mapping->writeback_index; /* Start from prev offset */
+		end = -1;
+	} else {
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
+		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		scanned = 1;
+	}
+	if (wbc->sync_mode == WB_SYNC_ALL)
+		tag = PAGECACHE_TAG_TOWRITE;
+	else
+		tag = PAGECACHE_TAG_DIRTY;
+retry:
+	if (wbc->sync_mode == WB_SYNC_ALL)
+		tag_pages_for_writeback(mapping, index, end);
+	while (!done && !nr_to_write_done && (index <= end) &&
+	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+		unsigned i;
+
+		scanned = 1;
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			if (!PagePrivate(page))
+				continue;
+
+			if (!wbc->range_cyclic && page->index > end) {
+				done = 1;
+				break;
+			}
+
+			eb = (struct extent_buffer *)page->private;
+			if (!eb) {
+				WARN_ON(1);
+				continue;
+			}
+
+			if (eb == prev_eb)
+				continue;
+
+			if (!atomic_inc_not_zero(&eb->refs)) {
+				WARN_ON(1);
+				continue;
+			}
+
+			prev_eb = eb;
+			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
+			if (!ret) {
+				free_extent_buffer(eb);
+				continue;
+			}
+
+			ret = write_one_eb(eb, fs_info, wbc, &epd);
+			if (ret) {
+				done = 1;
+				free_extent_buffer(eb);
+				break;
+			}
+			free_extent_buffer(eb);
+
+			/*
+			 * the filesystem may choose to bump up nr_to_write.
+			 * We have to make sure to honor the new nr_to_write
+			 * at any time
+			 */
+			nr_to_write_done = wbc->nr_to_write <= 0;
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+	if (!scanned && !done) {
+		/*
+		 * We hit the last page and there is more work to be done: wrap
+		 * back to the start of the file
+		 */
+		scanned = 1;
+		index = 0;
+		goto retry;
+	}
+	flush_write_bio(&epd);
+	return ret;
+}
+
 /**
  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
  * @mapping: address space structure to write
@@ -3099,10 +3435,14 @@
 static void flush_epd_write_bio(struct extent_page_data *epd)
 {
 	if (epd->bio) {
+		int rw = WRITE;
+		int ret;
+
 		if (epd->sync_io)
-			submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
-		else
-			submit_one_bio(WRITE, epd->bio, 0, 0);
+			rw = WRITE_SYNC;
+
+		ret = submit_one_bio(rw, epd->bio, 0, 0);
+		BUG_ON(ret < 0); /* -ENOMEM */
 		epd->bio = NULL;
 	}
 }
@@ -3219,7 +3559,7 @@
 	}
 	BUG_ON(!list_empty(pages));
 	if (bio)
-		submit_one_bio(READ, bio, 0, bio_flags);
+		return submit_one_bio(READ, bio, 0, bio_flags);
 	return 0;
 }
 
@@ -3240,7 +3580,7 @@
 	if (start > end)
 		return 0;
 
-	lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
+	lock_extent_bits(tree, start, end, 0, &cached_state);
 	wait_on_page_writeback(page);
 	clear_extent_bit(tree, start, end,
 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -3454,7 +3794,7 @@
 	}
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
-			 &cached_state, GFP_NOFS);
+			 &cached_state);
 
 	em = get_extent_skip_holes(inode, start, last_for_get_extent,
 				   get_extent);
@@ -3548,26 +3888,7 @@
 inline struct page *extent_buffer_page(struct extent_buffer *eb,
 					      unsigned long i)
 {
-	struct page *p;
-	struct address_space *mapping;
-
-	if (i == 0)
-		return eb->first_page;
-	i += eb->start >> PAGE_CACHE_SHIFT;
-	mapping = eb->first_page->mapping;
-	if (!mapping)
-		return NULL;
-
-	/*
-	 * extent_buffer_page is only called after pinning the page
-	 * by increasing the reference count.  So we know the page must
-	 * be in the radix tree.
-	 */
-	rcu_read_lock();
-	p = radix_tree_lookup(&mapping->page_tree, i);
-	rcu_read_unlock();
-
-	return p;
+	return eb->pages[i];
 }
 
 inline unsigned long num_extent_pages(u64 start, u64 len)
@@ -3576,6 +3897,19 @@
 		(start >> PAGE_CACHE_SHIFT);
 }
 
+static void __free_extent_buffer(struct extent_buffer *eb)
+{
+#if LEAK_DEBUG
+	unsigned long flags;
+	spin_lock_irqsave(&leak_lock, flags);
+	list_del(&eb->leak_list);
+	spin_unlock_irqrestore(&leak_lock, flags);
+#endif
+	if (eb->pages && eb->pages != eb->inline_pages)
+		kfree(eb->pages);
+	kmem_cache_free(extent_buffer_cache, eb);
+}
+
 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
 						   u64 start,
 						   unsigned long len,
@@ -3591,6 +3925,7 @@
 		return NULL;
 	eb->start = start;
 	eb->len = len;
+	eb->tree = tree;
 	rwlock_init(&eb->lock);
 	atomic_set(&eb->write_locks, 0);
 	atomic_set(&eb->read_locks, 0);
@@ -3607,20 +3942,32 @@
 	list_add(&eb->leak_list, &buffers);
 	spin_unlock_irqrestore(&leak_lock, flags);
 #endif
+	spin_lock_init(&eb->refs_lock);
 	atomic_set(&eb->refs, 1);
+	atomic_set(&eb->io_pages, 0);
+
+	if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
+		struct page **pages;
+		int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
+			PAGE_CACHE_SHIFT;
+		pages = kzalloc(num_pages, mask);
+		if (!pages) {
+			__free_extent_buffer(eb);
+			return NULL;
+		}
+		eb->pages = pages;
+	} else {
+		eb->pages = eb->inline_pages;
+	}
 
 	return eb;
 }
 
-static void __free_extent_buffer(struct extent_buffer *eb)
+static int extent_buffer_under_io(struct extent_buffer *eb)
 {
-#if LEAK_DEBUG
-	unsigned long flags;
-	spin_lock_irqsave(&leak_lock, flags);
-	list_del(&eb->leak_list);
-	spin_unlock_irqrestore(&leak_lock, flags);
-#endif
-	kmem_cache_free(extent_buffer_cache, eb);
+	return (atomic_read(&eb->io_pages) ||
+		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
+		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
 }
 
 /*
@@ -3632,8 +3979,7 @@
 	unsigned long index;
 	struct page *page;
 
-	if (!eb->first_page)
-		return;
+	BUG_ON(extent_buffer_under_io(eb));
 
 	index = num_extent_pages(eb->start, eb->len);
 	if (start_idx >= index)
@@ -3642,8 +3988,34 @@
 	do {
 		index--;
 		page = extent_buffer_page(eb, index);
-		if (page)
+		if (page) {
+			spin_lock(&page->mapping->private_lock);
+			/*
+			 * We do this since we'll remove the pages after we've
+			 * removed the eb from the radix tree, so we could race
+			 * and have this page now attached to the new eb.  So
+			 * only clear page_private if it's still connected to
+			 * this eb.
+			 */
+			if (PagePrivate(page) &&
+			    page->private == (unsigned long)eb) {
+				BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+				BUG_ON(PageDirty(page));
+				BUG_ON(PageWriteback(page));
+				/*
+				 * We need to make sure we haven't be attached
+				 * to a new eb.
+				 */
+				ClearPagePrivate(page);
+				set_page_private(page, 0);
+				/* One for the page private */
+				page_cache_release(page);
+			}
+			spin_unlock(&page->mapping->private_lock);
+
+			/* One for when we alloced the page */
 			page_cache_release(page);
+		}
 	} while (index != start_idx);
 }
 
@@ -3656,9 +4028,50 @@
 	__free_extent_buffer(eb);
 }
 
+static void check_buffer_tree_ref(struct extent_buffer *eb)
+{
+	/* the ref bit is tricky.  We have to make sure it is set
+	 * if we have the buffer dirty.   Otherwise the
+	 * code to free a buffer can end up dropping a dirty
+	 * page
+	 *
+	 * Once the ref bit is set, it won't go away while the
+	 * buffer is dirty or in writeback, and it also won't
+	 * go away while we have the reference count on the
+	 * eb bumped.
+	 *
+	 * We can't just set the ref bit without bumping the
+	 * ref on the eb because free_extent_buffer might
+	 * see the ref bit and try to clear it.  If this happens
+	 * free_extent_buffer might end up dropping our original
+	 * ref by mistake and freeing the page before we are able
+	 * to add one more ref.
+	 *
+	 * So bump the ref count first, then set the bit.  If someone
+	 * beat us to it, drop the ref we added.
+	 */
+	if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
+		atomic_inc(&eb->refs);
+		if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+			atomic_dec(&eb->refs);
+	}
+}
+
+static void mark_extent_buffer_accessed(struct extent_buffer *eb)
+{
+	unsigned long num_pages, i;
+
+	check_buffer_tree_ref(eb);
+
+	num_pages = num_extent_pages(eb->start, eb->len);
+	for (i = 0; i < num_pages; i++) {
+		struct page *p = extent_buffer_page(eb, i);
+		mark_page_accessed(p);
+	}
+}
+
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
-					  u64 start, unsigned long len,
-					  struct page *page0)
+					  u64 start, unsigned long len)
 {
 	unsigned long num_pages = num_extent_pages(start, len);
 	unsigned long i;
@@ -3674,7 +4087,7 @@
 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
 	if (eb && atomic_inc_not_zero(&eb->refs)) {
 		rcu_read_unlock();
-		mark_page_accessed(eb->first_page);
+		mark_extent_buffer_accessed(eb);
 		return eb;
 	}
 	rcu_read_unlock();
@@ -3683,32 +4096,43 @@
 	if (!eb)
 		return NULL;
 
-	if (page0) {
-		eb->first_page = page0;
-		i = 1;
-		index++;
-		page_cache_get(page0);
-		mark_page_accessed(page0);
-		set_page_extent_mapped(page0);
-		set_page_extent_head(page0, len);
-		uptodate = PageUptodate(page0);
-	} else {
-		i = 0;
-	}
-	for (; i < num_pages; i++, index++) {
+	for (i = 0; i < num_pages; i++, index++) {
 		p = find_or_create_page(mapping, index, GFP_NOFS);
 		if (!p) {
 			WARN_ON(1);
 			goto free_eb;
 		}
-		set_page_extent_mapped(p);
-		mark_page_accessed(p);
-		if (i == 0) {
-			eb->first_page = p;
-			set_page_extent_head(p, len);
-		} else {
-			set_page_private(p, EXTENT_PAGE_PRIVATE);
+
+		spin_lock(&mapping->private_lock);
+		if (PagePrivate(p)) {
+			/*
+			 * We could have already allocated an eb for this page
+			 * and attached one so lets see if we can get a ref on
+			 * the existing eb, and if we can we know it's good and
+			 * we can just return that one, else we know we can just
+			 * overwrite page->private.
+			 */
+			exists = (struct extent_buffer *)p->private;
+			if (atomic_inc_not_zero(&exists->refs)) {
+				spin_unlock(&mapping->private_lock);
+				unlock_page(p);
+				mark_extent_buffer_accessed(exists);
+				goto free_eb;
+			}
+
+			/*
+			 * Do this so attach doesn't complain and we need to
+			 * drop the ref the old guy had.
+			 */
+			ClearPagePrivate(p);
+			WARN_ON(PageDirty(p));
+			page_cache_release(p);
 		}
+		attach_extent_buffer_page(eb, p);
+		spin_unlock(&mapping->private_lock);
+		WARN_ON(PageDirty(p));
+		mark_page_accessed(p);
+		eb->pages[i] = p;
 		if (!PageUptodate(p))
 			uptodate = 0;
 
@@ -3716,12 +4140,10 @@
 		 * see below about how we avoid a nasty race with release page
 		 * and why we unlock later
 		 */
-		if (i != 0)
-			unlock_page(p);
 	}
 	if (uptodate)
 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-
+again:
 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 	if (ret)
 		goto free_eb;
@@ -3731,14 +4153,21 @@
 	if (ret == -EEXIST) {
 		exists = radix_tree_lookup(&tree->buffer,
 						start >> PAGE_CACHE_SHIFT);
-		/* add one reference for the caller */
-		atomic_inc(&exists->refs);
+		if (!atomic_inc_not_zero(&exists->refs)) {
+			spin_unlock(&tree->buffer_lock);
+			radix_tree_preload_end();
+			exists = NULL;
+			goto again;
+		}
 		spin_unlock(&tree->buffer_lock);
 		radix_tree_preload_end();
+		mark_extent_buffer_accessed(exists);
 		goto free_eb;
 	}
 	/* add one reference for the tree */
-	atomic_inc(&eb->refs);
+	spin_lock(&eb->refs_lock);
+	check_buffer_tree_ref(eb);
+	spin_unlock(&eb->refs_lock);
 	spin_unlock(&tree->buffer_lock);
 	radix_tree_preload_end();
 
@@ -3751,15 +4180,20 @@
 	 * after the extent buffer is in the radix tree so
 	 * it doesn't get lost
 	 */
-	set_page_extent_mapped(eb->first_page);
-	set_page_extent_head(eb->first_page, eb->len);
-	if (!page0)
-		unlock_page(eb->first_page);
+	SetPageChecked(eb->pages[0]);
+	for (i = 1; i < num_pages; i++) {
+		p = extent_buffer_page(eb, i);
+		ClearPageChecked(p);
+		unlock_page(p);
+	}
+	unlock_page(eb->pages[0]);
 	return eb;
 
 free_eb:
-	if (eb->first_page && !page0)
-		unlock_page(eb->first_page);
+	for (i = 0; i < num_pages; i++) {
+		if (eb->pages[i])
+			unlock_page(eb->pages[i]);
+	}
 
 	if (!atomic_dec_and_test(&eb->refs))
 		return exists;
@@ -3776,7 +4210,7 @@
 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
 	if (eb && atomic_inc_not_zero(&eb->refs)) {
 		rcu_read_unlock();
-		mark_page_accessed(eb->first_page);
+		mark_extent_buffer_accessed(eb);
 		return eb;
 	}
 	rcu_read_unlock();
@@ -3784,19 +4218,71 @@
 	return NULL;
 }
 
+static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
+{
+	struct extent_buffer *eb =
+			container_of(head, struct extent_buffer, rcu_head);
+
+	__free_extent_buffer(eb);
+}
+
+/* Expects to have eb->eb_lock already held */
+static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
+{
+	WARN_ON(atomic_read(&eb->refs) == 0);
+	if (atomic_dec_and_test(&eb->refs)) {
+		struct extent_io_tree *tree = eb->tree;
+
+		spin_unlock(&eb->refs_lock);
+
+		spin_lock(&tree->buffer_lock);
+		radix_tree_delete(&tree->buffer,
+				  eb->start >> PAGE_CACHE_SHIFT);
+		spin_unlock(&tree->buffer_lock);
+
+		/* Should be safe to release our pages at this point */
+		btrfs_release_extent_buffer_page(eb, 0);
+
+		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
+		return;
+	}
+	spin_unlock(&eb->refs_lock);
+}
+
 void free_extent_buffer(struct extent_buffer *eb)
 {
 	if (!eb)
 		return;
 
-	if (!atomic_dec_and_test(&eb->refs))
-		return;
+	spin_lock(&eb->refs_lock);
+	if (atomic_read(&eb->refs) == 2 &&
+	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
+	    !extent_buffer_under_io(eb) &&
+	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+		atomic_dec(&eb->refs);
 
-	WARN_ON(1);
+	/*
+	 * I know this is terrible, but it's temporary until we stop tracking
+	 * the uptodate bits and such for the extent buffers.
+	 */
+	release_extent_buffer(eb, GFP_ATOMIC);
 }
 
-int clear_extent_buffer_dirty(struct extent_io_tree *tree,
-			      struct extent_buffer *eb)
+void free_extent_buffer_stale(struct extent_buffer *eb)
+{
+	if (!eb)
+		return;
+
+	spin_lock(&eb->refs_lock);
+	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
+
+	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
+	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+		atomic_dec(&eb->refs);
+	release_extent_buffer(eb, GFP_NOFS);
+}
+
+void clear_extent_buffer_dirty(struct extent_buffer *eb)
 {
 	unsigned long i;
 	unsigned long num_pages;
@@ -3812,10 +4298,6 @@
 		lock_page(page);
 		WARN_ON(!PagePrivate(page));
 
-		set_page_extent_mapped(page);
-		if (i == 0)
-			set_page_extent_head(page, eb->len);
-
 		clear_page_dirty_for_io(page);
 		spin_lock_irq(&page->mapping->tree_lock);
 		if (!PageDirty(page)) {
@@ -3827,24 +4309,29 @@
 		ClearPageError(page);
 		unlock_page(page);
 	}
-	return 0;
+	WARN_ON(atomic_read(&eb->refs) == 0);
 }
 
-int set_extent_buffer_dirty(struct extent_io_tree *tree,
-			     struct extent_buffer *eb)
+int set_extent_buffer_dirty(struct extent_buffer *eb)
 {
 	unsigned long i;
 	unsigned long num_pages;
 	int was_dirty = 0;
 
+	check_buffer_tree_ref(eb);
+
 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
+
 	num_pages = num_extent_pages(eb->start, eb->len);
+	WARN_ON(atomic_read(&eb->refs) == 0);
+	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+
 	for (i = 0; i < num_pages; i++)
-		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
+		set_page_dirty(extent_buffer_page(eb, i));
 	return was_dirty;
 }
 
-static int __eb_straddles_pages(u64 start, u64 len)
+static int range_straddles_pages(u64 start, u64 len)
 {
 	if (len < PAGE_CACHE_SIZE)
 		return 1;
@@ -3855,25 +4342,14 @@
 	return 0;
 }
 
-static int eb_straddles_pages(struct extent_buffer *eb)
-{
-	return __eb_straddles_pages(eb->start, eb->len);
-}
-
-int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
-				struct extent_buffer *eb,
-				struct extent_state **cached_state)
+int clear_extent_buffer_uptodate(struct extent_buffer *eb)
 {
 	unsigned long i;
 	struct page *page;
 	unsigned long num_pages;
 
-	num_pages = num_extent_pages(eb->start, eb->len);
 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-
-	clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
-			      cached_state, GFP_NOFS);
-
+	num_pages = num_extent_pages(eb->start, eb->len);
 	for (i = 0; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
 		if (page)
@@ -3882,27 +4358,16 @@
 	return 0;
 }
 
-int set_extent_buffer_uptodate(struct extent_io_tree *tree,
-				struct extent_buffer *eb)
+int set_extent_buffer_uptodate(struct extent_buffer *eb)
 {
 	unsigned long i;
 	struct page *page;
 	unsigned long num_pages;
 
+	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 	num_pages = num_extent_pages(eb->start, eb->len);
-
-	if (eb_straddles_pages(eb)) {
-		set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
-				    NULL, GFP_NOFS);
-	}
 	for (i = 0; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
-		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
-		    ((i == num_pages - 1) &&
-		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
-			check_page_uptodate(tree, page);
-			continue;
-		}
 		SetPageUptodate(page);
 	}
 	return 0;
@@ -3917,7 +4382,7 @@
 	int uptodate;
 	unsigned long index;
 
-	if (__eb_straddles_pages(start, end - start + 1)) {
+	if (range_straddles_pages(start, end - start + 1)) {
 		ret = test_range_bit(tree, start, end,
 				     EXTENT_UPTODATE, 1, NULL);
 		if (ret)
@@ -3939,35 +4404,9 @@
 	return pg_uptodate;
 }
 
-int extent_buffer_uptodate(struct extent_io_tree *tree,
-			   struct extent_buffer *eb,
-			   struct extent_state *cached_state)
+int extent_buffer_uptodate(struct extent_buffer *eb)
 {
-	int ret = 0;
-	unsigned long num_pages;
-	unsigned long i;
-	struct page *page;
-	int pg_uptodate = 1;
-
-	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
-		return 1;
-
-	if (eb_straddles_pages(eb)) {
-		ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
-				   EXTENT_UPTODATE, 1, cached_state);
-		if (ret)
-			return ret;
-	}
-
-	num_pages = num_extent_pages(eb->start, eb->len);
-	for (i = 0; i < num_pages; i++) {
-		page = extent_buffer_page(eb, i);
-		if (!PageUptodate(page)) {
-			pg_uptodate = 0;
-			break;
-		}
-	}
-	return pg_uptodate;
+	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 }
 
 int read_extent_buffer_pages(struct extent_io_tree *tree,
@@ -3981,21 +4420,14 @@
 	int ret = 0;
 	int locked_pages = 0;
 	int all_uptodate = 1;
-	int inc_all_pages = 0;
 	unsigned long num_pages;
+	unsigned long num_reads = 0;
 	struct bio *bio = NULL;
 	unsigned long bio_flags = 0;
 
 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
 		return 0;
 
-	if (eb_straddles_pages(eb)) {
-		if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
-				   EXTENT_UPTODATE, 1, NULL)) {
-			return 0;
-		}
-	}
-
 	if (start) {
 		WARN_ON(start < eb->start);
 		start_i = (start >> PAGE_CACHE_SHIFT) -
@@ -4014,8 +4446,10 @@
 			lock_page(page);
 		}
 		locked_pages++;
-		if (!PageUptodate(page))
+		if (!PageUptodate(page)) {
+			num_reads++;
 			all_uptodate = 0;
+		}
 	}
 	if (all_uptodate) {
 		if (start_i == 0)
@@ -4023,20 +4457,12 @@
 		goto unlock_exit;
 	}
 
+	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+	eb->failed_mirror = 0;
+	atomic_set(&eb->io_pages, num_reads);
 	for (i = start_i; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
-
-		WARN_ON(!PagePrivate(page));
-
-		set_page_extent_mapped(page);
-		if (i == 0)
-			set_page_extent_head(page, eb->len);
-
-		if (inc_all_pages)
-			page_cache_get(page);
 		if (!PageUptodate(page)) {
-			if (start_i == 0)
-				inc_all_pages = 1;
 			ClearPageError(page);
 			err = __extent_read_full_page(tree, page,
 						      get_extent, &bio,
@@ -4048,8 +4474,11 @@
 		}
 	}
 
-	if (bio)
-		submit_one_bio(READ, bio, mirror_num, bio_flags);
+	if (bio) {
+		err = submit_one_bio(READ, bio, mirror_num, bio_flags);
+		if (err)
+			return err;
+	}
 
 	if (ret || wait != WAIT_COMPLETE)
 		return ret;
@@ -4061,8 +4490,6 @@
 			ret = -EIO;
 	}
 
-	if (!ret)
-		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 	return ret;
 
 unlock_exit:
@@ -4304,15 +4731,20 @@
 {
 	char *dst_kaddr = page_address(dst_page);
 	char *src_kaddr;
+	int must_memmove = 0;
 
 	if (dst_page != src_page) {
 		src_kaddr = page_address(src_page);
 	} else {
 		src_kaddr = dst_kaddr;
-		BUG_ON(areas_overlap(src_off, dst_off, len));
+		if (areas_overlap(src_off, dst_off, len))
+			must_memmove = 1;
 	}
 
-	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
+	if (must_memmove)
+		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
+	else
+		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
 }
 
 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
@@ -4382,7 +4814,7 @@
 		       "len %lu len %lu\n", dst_offset, len, dst->len);
 		BUG_ON(1);
 	}
-	if (!areas_overlap(src_offset, dst_offset, len)) {
+	if (dst_offset < src_offset) {
 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
 		return;
 	}
@@ -4408,47 +4840,48 @@
 	}
 }
 
-static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
+int try_release_extent_buffer(struct page *page, gfp_t mask)
 {
-	struct extent_buffer *eb =
-			container_of(head, struct extent_buffer, rcu_head);
-
-	btrfs_release_extent_buffer(eb);
-}
-
-int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
-{
-	u64 start = page_offset(page);
 	struct extent_buffer *eb;
-	int ret = 1;
-
-	spin_lock(&tree->buffer_lock);
-	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
-	if (!eb) {
-		spin_unlock(&tree->buffer_lock);
-		return ret;
-	}
-
-	if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
-		ret = 0;
-		goto out;
-	}
 
 	/*
-	 * set @eb->refs to 0 if it is already 1, and then release the @eb.
-	 * Or go back.
+	 * We need to make sure noboody is attaching this page to an eb right
+	 * now.
 	 */
-	if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
-		ret = 0;
-		goto out;
+	spin_lock(&page->mapping->private_lock);
+	if (!PagePrivate(page)) {
+		spin_unlock(&page->mapping->private_lock);
+		return 1;
 	}
 
-	radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
-out:
-	spin_unlock(&tree->buffer_lock);
+	eb = (struct extent_buffer *)page->private;
+	BUG_ON(!eb);
 
-	/* at this point we can safely release the extent buffer */
-	if (atomic_read(&eb->refs) == 0)
-		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
-	return ret;
+	/*
+	 * This is a little awful but should be ok, we need to make sure that
+	 * the eb doesn't disappear out from under us while we're looking at
+	 * this page.
+	 */
+	spin_lock(&eb->refs_lock);
+	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+		spin_unlock(&eb->refs_lock);
+		spin_unlock(&page->mapping->private_lock);
+		return 0;
+	}
+	spin_unlock(&page->mapping->private_lock);
+
+	if ((mask & GFP_NOFS) == GFP_NOFS)
+		mask = GFP_NOFS;
+
+	/*
+	 * If tree ref isn't set then we know the ref on this eb is a real ref,
+	 * so just return, this page will likely be freed soon anyway.
+	 */
+	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
+		spin_unlock(&eb->refs_lock);
+		return 0;
+	}
+	release_extent_buffer(eb, mask);
+
+	return 1;
 }
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index cecc351..faf10eb5 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -35,6 +35,10 @@
 #define EXTENT_BUFFER_DIRTY 2
 #define EXTENT_BUFFER_CORRUPT 3
 #define EXTENT_BUFFER_READAHEAD 4	/* this got triggered by readahead */
+#define EXTENT_BUFFER_TREE_REF 5
+#define EXTENT_BUFFER_STALE 6
+#define EXTENT_BUFFER_WRITEBACK 7
+#define EXTENT_BUFFER_IOERR 8
 
 /* these are flags for extent_clear_unlock_delalloc */
 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -54,6 +58,7 @@
 #define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
 
 struct extent_state;
+struct btrfs_root;
 
 typedef	int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
 				       struct bio *bio, int mirror_num,
@@ -69,9 +74,7 @@
 			      size_t size, struct bio *bio,
 			      unsigned long bio_flags);
 	int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
-	int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
-				       u64 start, u64 end, int failed_mirror,
-				       struct extent_state *state);
+	int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
 	int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
 					u64 start, u64 end,
 				       struct extent_state *state);
@@ -97,6 +100,7 @@
 	struct radix_tree_root buffer;
 	struct address_space *mapping;
 	u64 dirty_bytes;
+	int track_uptodate;
 	spinlock_t lock;
 	spinlock_t buffer_lock;
 	struct extent_io_ops *ops;
@@ -119,16 +123,21 @@
 	struct list_head leak_list;
 };
 
+#define INLINE_EXTENT_BUFFER_PAGES 16
+#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
 struct extent_buffer {
 	u64 start;
 	unsigned long len;
 	unsigned long map_start;
 	unsigned long map_len;
-	struct page *first_page;
 	unsigned long bflags;
+	struct extent_io_tree *tree;
+	spinlock_t refs_lock;
+	atomic_t refs;
+	atomic_t io_pages;
+	int failed_mirror;
 	struct list_head leak_list;
 	struct rcu_head rcu_head;
-	atomic_t refs;
 	pid_t lock_owner;
 
 	/* count of read lock holders on the extent buffer */
@@ -152,6 +161,9 @@
 	 * to unlock
 	 */
 	wait_queue_head_t read_lock_wq;
+	wait_queue_head_t lock_wq;
+	struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
+	struct page **pages;
 };
 
 static inline void extent_set_compress_type(unsigned long *bio_flags,
@@ -178,18 +190,17 @@
 int try_release_extent_mapping(struct extent_map_tree *map,
 			       struct extent_io_tree *tree, struct page *page,
 			       gfp_t mask);
-int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page);
+int try_release_extent_buffer(struct page *page, gfp_t mask);
 int try_release_extent_state(struct extent_map_tree *map,
 			     struct extent_io_tree *tree, struct page *page,
 			     gfp_t mask);
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		     int bits, struct extent_state **cached, gfp_t mask);
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
+		     int bits, struct extent_state **cached);
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
 			 struct extent_state **cached, gfp_t mask);
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-		    gfp_t mask);
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
 			  get_extent_t *get_extent, int mirror_num);
 int __init extent_io_init(void);
@@ -210,7 +221,7 @@
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 		    int bits, gfp_t mask);
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   int bits, int exclusive_bits, u64 *failed_start,
+		   int bits, u64 *failed_start,
 		   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask);
@@ -240,6 +251,8 @@
 		      struct address_space *mapping,
 		      get_extent_t *get_extent,
 		      struct writeback_control *wbc);
+int btree_write_cache_pages(struct address_space *mapping,
+			    struct writeback_control *wbc);
 int extent_readpages(struct extent_io_tree *tree,
 		     struct address_space *mapping,
 		     struct list_head *pages, unsigned nr_pages,
@@ -251,11 +264,11 @@
 void set_page_extent_mapped(struct page *page);
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
-					  u64 start, unsigned long len,
-					  struct page *page0);
+					  u64 start, unsigned long len);
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
 					 u64 start, unsigned long len);
 void free_extent_buffer(struct extent_buffer *eb);
+void free_extent_buffer_stale(struct extent_buffer *eb);
 #define WAIT_NONE	0
 #define WAIT_COMPLETE	1
 #define WAIT_PAGE_LOCK	2
@@ -287,19 +300,12 @@
 			   unsigned long src_offset, unsigned long len);
 void memset_extent_buffer(struct extent_buffer *eb, char c,
 			  unsigned long start, unsigned long len);
-int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
-int clear_extent_buffer_dirty(struct extent_io_tree *tree,
-			      struct extent_buffer *eb);
-int set_extent_buffer_dirty(struct extent_io_tree *tree,
-			     struct extent_buffer *eb);
-int set_extent_buffer_uptodate(struct extent_io_tree *tree,
-			       struct extent_buffer *eb);
-int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
-				struct extent_buffer *eb,
-				struct extent_state **cached_state);
-int extent_buffer_uptodate(struct extent_io_tree *tree,
-			   struct extent_buffer *eb,
-			   struct extent_state *cached_state);
+void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
+void clear_extent_buffer_dirty(struct extent_buffer *eb);
+int set_extent_buffer_dirty(struct extent_buffer *eb);
+int set_extent_buffer_uptodate(struct extent_buffer *eb);
+int clear_extent_buffer_uptodate(struct extent_buffer *eb);
+int extent_buffer_uptodate(struct extent_buffer *eb);
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
 		      unsigned long min_len, char **map,
 		      unsigned long *map_start,
@@ -320,4 +326,6 @@
 			u64 length, u64 logical, struct page *page,
 			int mirror_num);
 int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
+int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
+			 int mirror_num);
 #endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 078b4fd..5d158d3 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -25,10 +25,12 @@
 #include "transaction.h"
 #include "print-tree.h"
 
-#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
+#define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
 				   sizeof(struct btrfs_item) * 2) / \
 				  size) - 1))
 
+#define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE))
+
 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
 				   sizeof(struct btrfs_ordered_sum)) / \
 				   sizeof(struct btrfs_sector_sum) * \
@@ -59,7 +61,7 @@
 				      sizeof(*item));
 	if (ret < 0)
 		goto out;
-	BUG_ON(ret);
+	BUG_ON(ret); /* Can't happen */
 	leaf = path->nodes[0];
 	item = btrfs_item_ptr(leaf, path->slots[0],
 			      struct btrfs_file_extent_item);
@@ -284,6 +286,7 @@
 	struct btrfs_ordered_sum *sums;
 	struct btrfs_sector_sum *sector_sum;
 	struct btrfs_csum_item *item;
+	LIST_HEAD(tmplist);
 	unsigned long offset;
 	int ret;
 	size_t size;
@@ -358,7 +361,10 @@
 					MAX_ORDERED_SUM_BYTES(root));
 			sums = kzalloc(btrfs_ordered_sum_size(root, size),
 					GFP_NOFS);
-			BUG_ON(!sums);
+			if (!sums) {
+				ret = -ENOMEM;
+				goto fail;
+			}
 
 			sector_sum = sums->sums;
 			sums->bytenr = start;
@@ -380,12 +386,19 @@
 				offset += csum_size;
 				sector_sum++;
 			}
-			list_add_tail(&sums->list, list);
+			list_add_tail(&sums->list, &tmplist);
 		}
 		path->slots[0]++;
 	}
 	ret = 0;
 fail:
+	while (ret < 0 && !list_empty(&tmplist)) {
+		sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
+		list_del(&sums->list);
+		kfree(sums);
+	}
+	list_splice_tail(&tmplist, list);
+
 	btrfs_free_path(path);
 	return ret;
 }
@@ -420,7 +433,7 @@
 		offset = page_offset(bvec->bv_page) + bvec->bv_offset;
 
 	ordered = btrfs_lookup_ordered_extent(inode, offset);
-	BUG_ON(!ordered);
+	BUG_ON(!ordered); /* Logic error */
 	sums->bytenr = ordered->start;
 
 	while (bio_index < bio->bi_vcnt) {
@@ -439,11 +452,11 @@
 
 			sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
 				       GFP_NOFS);
-			BUG_ON(!sums);
+			BUG_ON(!sums); /* -ENOMEM */
 			sector_sum = sums->sums;
 			sums->len = bytes_left;
 			ordered = btrfs_lookup_ordered_extent(inode, offset);
-			BUG_ON(!ordered);
+			BUG_ON(!ordered); /* Logic error */
 			sums->bytenr = ordered->start;
 		}
 
@@ -483,18 +496,17 @@
  * This calls btrfs_truncate_item with the correct args based on the
  * overlap, and fixes up the key as required.
  */
-static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
-				      struct btrfs_root *root,
-				      struct btrfs_path *path,
-				      struct btrfs_key *key,
-				      u64 bytenr, u64 len)
+static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
+				       struct btrfs_root *root,
+				       struct btrfs_path *path,
+				       struct btrfs_key *key,
+				       u64 bytenr, u64 len)
 {
 	struct extent_buffer *leaf;
 	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 	u64 csum_end;
 	u64 end_byte = bytenr + len;
 	u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
-	int ret;
 
 	leaf = path->nodes[0];
 	csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
@@ -510,7 +522,7 @@
 		 */
 		u32 new_size = (bytenr - key->offset) >> blocksize_bits;
 		new_size *= csum_size;
-		ret = btrfs_truncate_item(trans, root, path, new_size, 1);
+		btrfs_truncate_item(trans, root, path, new_size, 1);
 	} else if (key->offset >= bytenr && csum_end > end_byte &&
 		   end_byte > key->offset) {
 		/*
@@ -522,15 +534,13 @@
 		u32 new_size = (csum_end - end_byte) >> blocksize_bits;
 		new_size *= csum_size;
 
-		ret = btrfs_truncate_item(trans, root, path, new_size, 0);
+		btrfs_truncate_item(trans, root, path, new_size, 0);
 
 		key->offset = end_byte;
-		ret = btrfs_set_item_key_safe(trans, root, path, key);
-		BUG_ON(ret);
+		btrfs_set_item_key_safe(trans, root, path, key);
 	} else {
 		BUG();
 	}
-	return 0;
 }
 
 /*
@@ -635,13 +645,14 @@
 			 * item changed size or key
 			 */
 			ret = btrfs_split_item(trans, root, path, &key, offset);
-			BUG_ON(ret && ret != -EAGAIN);
+			if (ret && ret != -EAGAIN) {
+				btrfs_abort_transaction(trans, root, ret);
+				goto out;
+			}
 
 			key.offset = end_byte - 1;
 		} else {
-			ret = truncate_one_csum(trans, root, path,
-						&key, bytenr, len);
-			BUG_ON(ret);
+			truncate_one_csum(trans, root, path, &key, bytenr, len);
 			if (key.offset < bytenr)
 				break;
 		}
@@ -772,7 +783,7 @@
 		if (diff != csum_size)
 			goto insert;
 
-		ret = btrfs_extend_item(trans, root, path, diff);
+		btrfs_extend_item(trans, root, path, diff);
 		goto csum;
 	}
 
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e8d06b6..d83260d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -452,7 +452,7 @@
 			split = alloc_extent_map();
 		if (!split2)
 			split2 = alloc_extent_map();
-		BUG_ON(!split || !split2);
+		BUG_ON(!split || !split2); /* -ENOMEM */
 
 		write_lock(&em_tree->lock);
 		em = lookup_extent_mapping(em_tree, start, len);
@@ -494,7 +494,7 @@
 			split->flags = flags;
 			split->compress_type = em->compress_type;
 			ret = add_extent_mapping(em_tree, split);
-			BUG_ON(ret);
+			BUG_ON(ret); /* Logic error */
 			free_extent_map(split);
 			split = split2;
 			split2 = NULL;
@@ -520,7 +520,7 @@
 			}
 
 			ret = add_extent_mapping(em_tree, split);
-			BUG_ON(ret);
+			BUG_ON(ret); /* Logic error */
 			free_extent_map(split);
 			split = NULL;
 		}
@@ -679,7 +679,7 @@
 						root->root_key.objectid,
 						new_key.objectid,
 						start - extent_offset, 0);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM */
 				*hint_byte = disk_bytenr;
 			}
 			key.offset = start;
@@ -754,7 +754,7 @@
 						root->root_key.objectid,
 						key.objectid, key.offset -
 						extent_offset, 0);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM */
 				inode_sub_bytes(inode,
 						extent_end - key.offset);
 				*hint_byte = disk_bytenr;
@@ -770,7 +770,10 @@
 
 			ret = btrfs_del_items(trans, root, path, del_slot,
 					      del_nr);
-			BUG_ON(ret);
+			if (ret) {
+				btrfs_abort_transaction(trans, root, ret);
+				goto out;
+			}
 
 			del_nr = 0;
 			del_slot = 0;
@@ -782,11 +785,13 @@
 		BUG_ON(1);
 	}
 
-	if (del_nr > 0) {
+	if (!ret && del_nr > 0) {
 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
-		BUG_ON(ret);
+		if (ret)
+			btrfs_abort_transaction(trans, root, ret);
 	}
 
+out:
 	btrfs_free_path(path);
 	return ret;
 }
@@ -944,7 +949,10 @@
 			btrfs_release_path(path);
 			goto again;
 		}
-		BUG_ON(ret < 0);
+		if (ret < 0) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto out;
+		}
 
 		leaf = path->nodes[0];
 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
@@ -963,7 +971,7 @@
 		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
 					   root->root_key.objectid,
 					   ino, orig_offset, 0);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 
 		if (split == start) {
 			key.offset = start;
@@ -990,7 +998,7 @@
 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
 					0, root->root_key.objectid,
 					ino, orig_offset, 0);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 	other_start = 0;
 	other_end = start;
@@ -1007,7 +1015,7 @@
 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
 					0, root->root_key.objectid,
 					ino, orig_offset, 0);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 	if (del_nr == 0) {
 		fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -1025,7 +1033,10 @@
 		btrfs_mark_buffer_dirty(leaf);
 
 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
-		BUG_ON(ret);
+		if (ret < 0) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto out;
+		}
 	}
 out:
 	btrfs_free_path(path);
@@ -1105,8 +1116,7 @@
 	if (start_pos < inode->i_size) {
 		struct btrfs_ordered_extent *ordered;
 		lock_extent_bits(&BTRFS_I(inode)->io_tree,
-				 start_pos, last_pos - 1, 0, &cached_state,
-				 GFP_NOFS);
+				 start_pos, last_pos - 1, 0, &cached_state);
 		ordered = btrfs_lookup_first_ordered_extent(inode,
 							    last_pos - 1);
 		if (ordered &&
@@ -1638,7 +1648,7 @@
 		 * transaction
 		 */
 		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
-				 locked_end, 0, &cached_state, GFP_NOFS);
+				 locked_end, 0, &cached_state);
 		ordered = btrfs_lookup_first_ordered_extent(inode,
 							    alloc_end - 1);
 		if (ordered &&
@@ -1667,7 +1677,13 @@
 
 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 				      alloc_end - cur_offset, 0);
-		BUG_ON(IS_ERR_OR_NULL(em));
+		if (IS_ERR_OR_NULL(em)) {
+			if (!em)
+				ret = -ENOMEM;
+			else
+				ret = PTR_ERR(em);
+			break;
+		}
 		last_byte = min(extent_map_end(em), alloc_end);
 		actual_end = min_t(u64, extent_map_end(em), offset + len);
 		last_byte = (last_byte + mask) & ~mask;
@@ -1737,7 +1753,7 @@
 		return -ENXIO;
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
-			 &cached_state, GFP_NOFS);
+			 &cached_state);
 
 	/*
 	 * Delalloc is such a pain.  If we have a hole and we have pending
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index b02e379..e88330d3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -230,11 +230,13 @@
 
 	if (ret) {
 		trans->block_rsv = rsv;
-		WARN_ON(1);
+		btrfs_abort_transaction(trans, root, ret);
 		return ret;
 	}
 
 	ret = btrfs_update_inode(trans, root, inode);
+	if (ret)
+		btrfs_abort_transaction(trans, root, ret);
 	trans->block_rsv = rsv;
 
 	return ret;
@@ -869,7 +871,7 @@
 	io_ctl_prepare_pages(&io_ctl, inode, 0);
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
-			 0, &cached_state, GFP_NOFS);
+			 0, &cached_state);
 
 	node = rb_first(&ctl->free_space_offset);
 	if (!node && cluster) {
@@ -1948,14 +1950,14 @@
 		 */
 		ret = btrfs_add_free_space(block_group, old_start,
 					   offset - old_start);
-		WARN_ON(ret);
+		WARN_ON(ret); /* -ENOMEM */
 		goto out;
 	}
 
 	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
 	if (ret == -EAGAIN)
 		goto again;
-	BUG_ON(ret);
+	BUG_ON(ret); /* logic error */
 out_lock:
 	spin_unlock(&ctl->tree_lock);
 out:
@@ -2346,7 +2348,7 @@
 	rb_erase(&entry->offset_index, &ctl->free_space_offset);
 	ret = tree_insert_offset(&cluster->root, entry->offset,
 				 &entry->offset_index, 1);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -EEXIST; Logic error */
 
 	trace_btrfs_setup_cluster(block_group, cluster,
 				  total_found * block_group->sectorsize, 1);
@@ -2439,7 +2441,7 @@
 		ret = tree_insert_offset(&cluster->root, entry->offset,
 					 &entry->offset_index, 0);
 		total_size += entry->bytes;
-		BUG_ON(ret);
+		BUG_ON(ret); /* -EEXIST; Logic error */
 	} while (node && entry != last);
 
 	cluster->max_size = max_extent;
@@ -2830,6 +2832,7 @@
 		int ret;
 
 		ret = search_bitmap(ctl, entry, &offset, &count);
+		/* Logic error; Should be empty if it can't find anything */
 		BUG_ON(ret);
 
 		ino = offset;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index baa74f3..a13cf1a 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -19,6 +19,7 @@
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
+#include "print-tree.h"
 
 static int find_name_in_backref(struct btrfs_path *path, const char *name,
 			 int name_len, struct btrfs_inode_ref **ref_ret)
@@ -128,13 +129,14 @@
 	item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
 	memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
 			      item_size - (ptr + sub_item_len - item_start));
-	ret = btrfs_truncate_item(trans, root, path,
+	btrfs_truncate_item(trans, root, path,
 				  item_size - sub_item_len, 1);
 out:
 	btrfs_free_path(path);
 	return ret;
 }
 
+/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   const char *name, int name_len,
@@ -165,7 +167,7 @@
 			goto out;
 
 		old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
-		ret = btrfs_extend_item(trans, root, path, ins_len);
+		btrfs_extend_item(trans, root, path, ins_len);
 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
 				     struct btrfs_inode_ref);
 		ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index ee15d88..b1a1c92 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -178,7 +178,7 @@
 
 	tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
 			  root->root_key.objectid);
-	BUG_ON(IS_ERR(tsk));
+	BUG_ON(IS_ERR(tsk)); /* -ENOMEM */
 }
 
 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -271,7 +271,7 @@
 			break;
 
 		info = rb_entry(n, struct btrfs_free_space, offset_index);
-		BUG_ON(info->bitmap);
+		BUG_ON(info->bitmap); /* Logic error */
 
 		if (info->offset > root->cache_progress)
 			goto free;
@@ -439,17 +439,16 @@
 	if (ret)
 		goto out;
 	trace_btrfs_space_reservation(root->fs_info, "ino_cache",
-				      (u64)(unsigned long)trans,
-				      trans->bytes_reserved, 1);
+				      trans->transid, trans->bytes_reserved, 1);
 again:
 	inode = lookup_free_ino_inode(root, path);
-	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+	if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
 		ret = PTR_ERR(inode);
 		goto out_release;
 	}
 
 	if (IS_ERR(inode)) {
-		BUG_ON(retry);
+		BUG_ON(retry); /* Logic error */
 		retry = true;
 
 		ret = create_free_ino_inode(root, trans, path);
@@ -460,12 +459,17 @@
 
 	BTRFS_I(inode)->generation = 0;
 	ret = btrfs_update_inode(trans, root, inode);
-	WARN_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto out_put;
+	}
 
 	if (i_size_read(inode) > 0) {
 		ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
-		if (ret)
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
 			goto out_put;
+		}
 	}
 
 	spin_lock(&root->cache_lock);
@@ -502,8 +506,7 @@
 	iput(inode);
 out_release:
 	trace_btrfs_space_reservation(root->fs_info, "ino_cache",
-				      (u64)(unsigned long)trans,
-				      trans->bytes_reserved, 0);
+				      trans->transid, trans->bytes_reserved, 0);
 	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
 out:
 	trans->block_rsv = rsv;
@@ -532,7 +535,7 @@
 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 	if (ret < 0)
 		goto error;
-	BUG_ON(ret == 0);
+	BUG_ON(ret == 0); /* Corruption */
 	if (path->slots[0] > 0) {
 		slot = path->slots[0] - 1;
 		l = path->nodes[0];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3a0b5c1..115bc05 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -150,7 +150,6 @@
 	inode_add_bytes(inode, size);
 	ret = btrfs_insert_empty_item(trans, root, path, &key,
 				      datasize);
-	BUG_ON(ret);
 	if (ret) {
 		err = ret;
 		goto fail;
@@ -206,9 +205,9 @@
 	 * could end up racing with unlink.
 	 */
 	BTRFS_I(inode)->disk_i_size = inode->i_size;
-	btrfs_update_inode(trans, root, inode);
+	ret = btrfs_update_inode(trans, root, inode);
 
-	return 0;
+	return ret;
 fail:
 	btrfs_free_path(path);
 	return err;
@@ -250,14 +249,18 @@
 
 	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
 				 &hint_byte, 1);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 
 	if (isize > actual_end)
 		inline_len = min_t(u64, isize, actual_end);
 	ret = insert_inline_extent(trans, root, inode, start,
 				   inline_len, compressed_size,
 				   compress_type, compressed_pages);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		return ret;
+	}
 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
 	return 0;
@@ -293,7 +296,7 @@
 	struct async_extent *async_extent;
 
 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
-	BUG_ON(!async_extent);
+	BUG_ON(!async_extent); /* -ENOMEM */
 	async_extent->start = start;
 	async_extent->ram_size = ram_size;
 	async_extent->compressed_size = compressed_size;
@@ -344,8 +347,9 @@
 	int will_compress;
 	int compress_type = root->fs_info->compress_type;
 
-	/* if this is a small write inside eof, kick off a defragbot */
-	if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
+	/* if this is a small write inside eof, kick off a defrag */
+	if ((end - start + 1) < 16 * 1024 &&
+	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
 		btrfs_add_inode_defrag(NULL, inode);
 
 	actual_end = min_t(u64, isize, end + 1);
@@ -433,7 +437,11 @@
 cont:
 	if (start == 0) {
 		trans = btrfs_join_transaction(root);
-		BUG_ON(IS_ERR(trans));
+		if (IS_ERR(trans)) {
+			ret = PTR_ERR(trans);
+			trans = NULL;
+			goto cleanup_and_out;
+		}
 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
 		/* lets try to make an inline extent */
@@ -450,11 +458,11 @@
 						    total_compressed,
 						    compress_type, pages);
 		}
-		if (ret == 0) {
+		if (ret <= 0) {
 			/*
-			 * inline extent creation worked, we don't need
-			 * to create any more async work items.  Unlock
-			 * and free up our temp pages.
+			 * inline extent creation worked or returned error,
+			 * we don't need to create any more async work items.
+			 * Unlock and free up our temp pages.
 			 */
 			extent_clear_unlock_delalloc(inode,
 			     &BTRFS_I(inode)->io_tree,
@@ -547,7 +555,7 @@
 	}
 
 out:
-	return 0;
+	return ret;
 
 free_pages_out:
 	for (i = 0; i < nr_pages_ret; i++) {
@@ -557,6 +565,20 @@
 	kfree(pages);
 
 	goto out;
+
+cleanup_and_out:
+	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+				     start, end, NULL,
+				     EXTENT_CLEAR_UNLOCK_PAGE |
+				     EXTENT_CLEAR_DIRTY |
+				     EXTENT_CLEAR_DELALLOC |
+				     EXTENT_SET_WRITEBACK |
+				     EXTENT_END_WRITEBACK);
+	if (!trans || IS_ERR(trans))
+		btrfs_error(root->fs_info, ret, "Failed to join transaction");
+	else
+		btrfs_abort_transaction(trans, root, ret);
+	goto free_pages_out;
 }
 
 /*
@@ -597,7 +619,7 @@
 
 			lock_extent(io_tree, async_extent->start,
 					 async_extent->start +
-					 async_extent->ram_size - 1, GFP_NOFS);
+					 async_extent->ram_size - 1);
 
 			/* allocate blocks */
 			ret = cow_file_range(inode, async_cow->locked_page,
@@ -606,6 +628,8 @@
 					     async_extent->ram_size - 1,
 					     &page_started, &nr_written, 0);
 
+			/* JDM XXX */
+
 			/*
 			 * if page_started, cow_file_range inserted an
 			 * inline extent and took care of all the unlocking
@@ -625,18 +649,21 @@
 		}
 
 		lock_extent(io_tree, async_extent->start,
-			    async_extent->start + async_extent->ram_size - 1,
-			    GFP_NOFS);
+			    async_extent->start + async_extent->ram_size - 1);
 
 		trans = btrfs_join_transaction(root);
-		BUG_ON(IS_ERR(trans));
-		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
-		ret = btrfs_reserve_extent(trans, root,
+		if (IS_ERR(trans)) {
+			ret = PTR_ERR(trans);
+		} else {
+			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+			ret = btrfs_reserve_extent(trans, root,
 					   async_extent->compressed_size,
 					   async_extent->compressed_size,
-					   0, alloc_hint,
-					   (u64)-1, &ins, 1);
-		btrfs_end_transaction(trans, root);
+					   0, alloc_hint, &ins, 1);
+			if (ret)
+				btrfs_abort_transaction(trans, root, ret);
+			btrfs_end_transaction(trans, root);
+		}
 
 		if (ret) {
 			int i;
@@ -649,8 +676,10 @@
 			async_extent->pages = NULL;
 			unlock_extent(io_tree, async_extent->start,
 				      async_extent->start +
-				      async_extent->ram_size - 1, GFP_NOFS);
-			goto retry;
+				      async_extent->ram_size - 1);
+			if (ret == -ENOSPC)
+				goto retry;
+			goto out_free; /* JDM: Requeue? */
 		}
 
 		/*
@@ -662,7 +691,7 @@
 					async_extent->ram_size - 1, 0);
 
 		em = alloc_extent_map();
-		BUG_ON(!em);
+		BUG_ON(!em); /* -ENOMEM */
 		em->start = async_extent->start;
 		em->len = async_extent->ram_size;
 		em->orig_start = em->start;
@@ -694,7 +723,7 @@
 						ins.offset,
 						BTRFS_ORDERED_COMPRESSED,
 						async_extent->compress_type);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 
 		/*
 		 * clear dirty, set writeback and unlock the pages.
@@ -716,13 +745,17 @@
 				    ins.offset, async_extent->pages,
 				    async_extent->nr_pages);
 
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 		alloc_hint = ins.objectid + ins.offset;
 		kfree(async_extent);
 		cond_resched();
 	}
-
-	return 0;
+	ret = 0;
+out:
+	return ret;
+out_free:
+	kfree(async_extent);
+	goto out;
 }
 
 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
@@ -791,7 +824,18 @@
 
 	BUG_ON(btrfs_is_free_space_inode(root, inode));
 	trans = btrfs_join_transaction(root);
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans)) {
+		extent_clear_unlock_delalloc(inode,
+			     &BTRFS_I(inode)->io_tree,
+			     start, end, NULL,
+			     EXTENT_CLEAR_UNLOCK_PAGE |
+			     EXTENT_CLEAR_UNLOCK |
+			     EXTENT_CLEAR_DELALLOC |
+			     EXTENT_CLEAR_DIRTY |
+			     EXTENT_SET_WRITEBACK |
+			     EXTENT_END_WRITEBACK);
+		return PTR_ERR(trans);
+	}
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -800,7 +844,8 @@
 	ret = 0;
 
 	/* if this is a small write inside eof, kick off defrag */
-	if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
+	if (num_bytes < 64 * 1024 &&
+	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
 		btrfs_add_inode_defrag(trans, inode);
 
 	if (start == 0) {
@@ -821,8 +866,10 @@
 			*nr_written = *nr_written +
 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
 			*page_started = 1;
-			ret = 0;
 			goto out;
+		} else if (ret < 0) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto out_unlock;
 		}
 	}
 
@@ -838,11 +885,14 @@
 		cur_alloc_size = disk_num_bytes;
 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
 					   root->sectorsize, 0, alloc_hint,
-					   (u64)-1, &ins, 1);
-		BUG_ON(ret);
+					   &ins, 1);
+		if (ret < 0) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto out_unlock;
+		}
 
 		em = alloc_extent_map();
-		BUG_ON(!em);
+		BUG_ON(!em); /* -ENOMEM */
 		em->start = start;
 		em->orig_start = em->start;
 		ram_size = ins.offset;
@@ -868,13 +918,16 @@
 		cur_alloc_size = ins.offset;
 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
 					       ram_size, cur_alloc_size, 0);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 
 		if (root->root_key.objectid ==
 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
 			ret = btrfs_reloc_clone_csums(inode, start,
 						      cur_alloc_size);
-			BUG_ON(ret);
+			if (ret) {
+				btrfs_abort_transaction(trans, root, ret);
+				goto out_unlock;
+			}
 		}
 
 		if (disk_num_bytes < cur_alloc_size)
@@ -899,11 +952,23 @@
 		alloc_hint = ins.objectid + ins.offset;
 		start += cur_alloc_size;
 	}
-out:
 	ret = 0;
+out:
 	btrfs_end_transaction(trans, root);
 
 	return ret;
+out_unlock:
+	extent_clear_unlock_delalloc(inode,
+		     &BTRFS_I(inode)->io_tree,
+		     start, end, NULL,
+		     EXTENT_CLEAR_UNLOCK_PAGE |
+		     EXTENT_CLEAR_UNLOCK |
+		     EXTENT_CLEAR_DELALLOC |
+		     EXTENT_CLEAR_DIRTY |
+		     EXTENT_SET_WRITEBACK |
+		     EXTENT_END_WRITEBACK);
+
+	goto out;
 }
 
 /*
@@ -969,7 +1034,7 @@
 			 1, 0, NULL, GFP_NOFS);
 	while (start < end) {
 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
-		BUG_ON(!async_cow);
+		BUG_ON(!async_cow); /* -ENOMEM */
 		async_cow->inode = inode;
 		async_cow->root = root;
 		async_cow->locked_page = locked_page;
@@ -1060,7 +1125,7 @@
 	u64 disk_bytenr;
 	u64 num_bytes;
 	int extent_type;
-	int ret;
+	int ret, err;
 	int type;
 	int nocow;
 	int check_prev = 1;
@@ -1078,7 +1143,11 @@
 	else
 		trans = btrfs_join_transaction(root);
 
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans)) {
+		btrfs_free_path(path);
+		return PTR_ERR(trans);
+	}
+
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
 	cow_start = (u64)-1;
@@ -1086,7 +1155,10 @@
 	while (1) {
 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 					       cur_offset, 0);
-		BUG_ON(ret < 0);
+		if (ret < 0) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto error;
+		}
 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
 			leaf = path->nodes[0];
 			btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1100,8 +1172,10 @@
 		leaf = path->nodes[0];
 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 			ret = btrfs_next_leaf(root, path);
-			if (ret < 0)
-				BUG_ON(1);
+			if (ret < 0) {
+				btrfs_abort_transaction(trans, root, ret);
+				goto error;
+			}
 			if (ret > 0)
 				break;
 			leaf = path->nodes[0];
@@ -1189,7 +1263,10 @@
 			ret = cow_file_range(inode, locked_page, cow_start,
 					found_key.offset - 1, page_started,
 					nr_written, 1);
-			BUG_ON(ret);
+			if (ret) {
+				btrfs_abort_transaction(trans, root, ret);
+				goto error;
+			}
 			cow_start = (u64)-1;
 		}
 
@@ -1198,7 +1275,7 @@
 			struct extent_map_tree *em_tree;
 			em_tree = &BTRFS_I(inode)->extent_tree;
 			em = alloc_extent_map();
-			BUG_ON(!em);
+			BUG_ON(!em); /* -ENOMEM */
 			em->start = cur_offset;
 			em->orig_start = em->start;
 			em->len = num_bytes;
@@ -1224,13 +1301,16 @@
 
 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
 					       num_bytes, num_bytes, type);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 
 		if (root->root_key.objectid ==
 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
 						      num_bytes);
-			BUG_ON(ret);
+			if (ret) {
+				btrfs_abort_transaction(trans, root, ret);
+				goto error;
+			}
 		}
 
 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
@@ -1249,18 +1329,23 @@
 	if (cow_start != (u64)-1) {
 		ret = cow_file_range(inode, locked_page, cow_start, end,
 				     page_started, nr_written, 1);
-		BUG_ON(ret);
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto error;
+		}
 	}
 
+error:
 	if (nolock) {
-		ret = btrfs_end_transaction_nolock(trans, root);
-		BUG_ON(ret);
+		err = btrfs_end_transaction_nolock(trans, root);
 	} else {
-		ret = btrfs_end_transaction(trans, root);
-		BUG_ON(ret);
+		err = btrfs_end_transaction(trans, root);
 	}
+	if (!ret)
+		ret = err;
+
 	btrfs_free_path(path);
-	return 0;
+	return ret;
 }
 
 /*
@@ -1425,10 +1510,11 @@
 	map_length = length;
 	ret = btrfs_map_block(map_tree, READ, logical,
 			      &map_length, NULL, 0);
-
+	/* Will always return 0 or 1 with map_multi == NULL */
+	BUG_ON(ret < 0);
 	if (map_length < length + size)
 		return 1;
-	return ret;
+	return 0;
 }
 
 /*
@@ -1448,7 +1534,7 @@
 	int ret = 0;
 
 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 	return 0;
 }
 
@@ -1479,14 +1565,16 @@
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret = 0;
 	int skip_sum;
+	int metadata = 0;
 
 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
 	if (btrfs_is_free_space_inode(root, inode))
-		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
-	else
-		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
-	BUG_ON(ret);
+		metadata = 2;
+
+	ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
+	if (ret)
+		return ret;
 
 	if (!(rw & REQ_WRITE)) {
 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
@@ -1571,7 +1659,7 @@
 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
-			 &cached_state, GFP_NOFS);
+			 &cached_state);
 
 	/* already ordered? We're done */
 	if (PagePrivate2(page))
@@ -1675,13 +1763,15 @@
 	 */
 	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
 				 &hint, 0);
-	BUG_ON(ret);
+	if (ret)
+		goto out;
 
 	ins.objectid = btrfs_ino(inode);
 	ins.offset = file_pos;
 	ins.type = BTRFS_EXTENT_DATA_KEY;
 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
-	BUG_ON(ret);
+	if (ret)
+		goto out;
 	leaf = path->nodes[0];
 	fi = btrfs_item_ptr(leaf, path->slots[0],
 			    struct btrfs_file_extent_item);
@@ -1709,10 +1799,10 @@
 	ret = btrfs_alloc_reserved_file_extent(trans, root,
 					root->root_key.objectid,
 					btrfs_ino(inode), file_pos, &ins);
-	BUG_ON(ret);
+out:
 	btrfs_free_path(path);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -1740,35 +1830,41 @@
 					     end - start + 1);
 	if (!ret)
 		return 0;
-	BUG_ON(!ordered_extent);
+	BUG_ON(!ordered_extent); /* Logic error */
 
 	nolock = btrfs_is_free_space_inode(root, inode);
 
 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
-		BUG_ON(!list_empty(&ordered_extent->list));
+		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
 		if (!ret) {
 			if (nolock)
 				trans = btrfs_join_transaction_nolock(root);
 			else
 				trans = btrfs_join_transaction(root);
-			BUG_ON(IS_ERR(trans));
+			if (IS_ERR(trans))
+				return PTR_ERR(trans);
 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 			ret = btrfs_update_inode_fallback(trans, root, inode);
-			BUG_ON(ret);
+			if (ret) /* -ENOMEM or corruption */
+				btrfs_abort_transaction(trans, root, ret);
 		}
 		goto out;
 	}
 
 	lock_extent_bits(io_tree, ordered_extent->file_offset,
 			 ordered_extent->file_offset + ordered_extent->len - 1,
-			 0, &cached_state, GFP_NOFS);
+			 0, &cached_state);
 
 	if (nolock)
 		trans = btrfs_join_transaction_nolock(root);
 	else
 		trans = btrfs_join_transaction(root);
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans)) {
+		ret = PTR_ERR(trans);
+		trans = NULL;
+		goto out_unlock;
+	}
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -1779,7 +1875,6 @@
 						ordered_extent->file_offset,
 						ordered_extent->file_offset +
 						ordered_extent->len);
-		BUG_ON(ret);
 	} else {
 		BUG_ON(root == root->fs_info->tree_root);
 		ret = insert_reserved_file_extent(trans, inode,
@@ -1793,11 +1888,14 @@
 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
 				   ordered_extent->file_offset,
 				   ordered_extent->len);
-		BUG_ON(ret);
 	}
 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
 			     ordered_extent->file_offset +
 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
+	if (ret < 0) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto out;
+	}
 
 	add_pending_csums(trans, inode, ordered_extent->file_offset,
 			  &ordered_extent->list);
@@ -1805,7 +1903,10 @@
 	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
 		ret = btrfs_update_inode_fallback(trans, root, inode);
-		BUG_ON(ret);
+		if (ret) { /* -ENOMEM or corruption */
+			btrfs_abort_transaction(trans, root, ret);
+			goto out;
+		}
 	}
 	ret = 0;
 out:
@@ -1824,6 +1925,11 @@
 	btrfs_put_ordered_extent(ordered_extent);
 
 	return 0;
+out_unlock:
+	unlock_extent_cached(io_tree, ordered_extent->file_offset,
+			     ordered_extent->file_offset +
+			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
+	goto out;
 }
 
 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
@@ -1905,6 +2011,8 @@
 	struct inode *inode;
 };
 
+/* JDM: If this is fs-wide, why can't we add a pointer to
+ * btrfs_inode instead and avoid the allocation? */
 void btrfs_add_delayed_iput(struct inode *inode)
 {
 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
@@ -2051,20 +2159,27 @@
 	/* grab metadata reservation from transaction handle */
 	if (reserve) {
 		ret = btrfs_orphan_reserve_metadata(trans, inode);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
 	}
 
 	/* insert an orphan item to track this unlinked/truncated file */
 	if (insert >= 1) {
 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
-		BUG_ON(ret && ret != -EEXIST);
+		if (ret && ret != -EEXIST) {
+			btrfs_abort_transaction(trans, root, ret);
+			return ret;
+		}
+		ret = 0;
 	}
 
 	/* insert an orphan item to track subvolume contains orphan files */
 	if (insert >= 2) {
 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
 					       root->root_key.objectid);
-		BUG_ON(ret);
+		if (ret && ret != -EEXIST) {
+			btrfs_abort_transaction(trans, root, ret);
+			return ret;
+		}
 	}
 	return 0;
 }
@@ -2094,7 +2209,7 @@
 
 	if (trans && delete_item) {
 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
 	}
 
 	if (release_rsv)
@@ -2228,7 +2343,7 @@
 			}
 			ret = btrfs_del_orphan_item(trans, root,
 						    found_key.objectid);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
 			btrfs_end_transaction(trans, root);
 			continue;
 		}
@@ -2610,16 +2725,22 @@
 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
 		       "inode %llu parent %llu\n", name_len, name,
 		       (unsigned long long)ino, (unsigned long long)dir_ino);
+		btrfs_abort_transaction(trans, root, ret);
 		goto err;
 	}
 
 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
-	if (ret)
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
 		goto err;
+	}
 
 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
 					 inode, dir_ino);
-	BUG_ON(ret != 0 && ret != -ENOENT);
+	if (ret != 0 && ret != -ENOENT) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto err;
+	}
 
 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
 					   dir, index);
@@ -2777,7 +2898,7 @@
 			err = ret;
 			goto out;
 		}
-		BUG_ON(ret == 0);
+		BUG_ON(ret == 0); /* Corruption */
 		if (check_path_shared(root, path))
 			goto out;
 		btrfs_release_path(path);
@@ -2810,7 +2931,7 @@
 		err = PTR_ERR(ref);
 		goto out;
 	}
-	BUG_ON(!ref);
+	BUG_ON(!ref); /* Logic error */
 	if (check_path_shared(root, path))
 		goto out;
 	index = btrfs_inode_ref_index(path->nodes[0], ref);
@@ -2917,23 +3038,42 @@
 
 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
 				   name, name_len, -1);
-	BUG_ON(IS_ERR_OR_NULL(di));
+	if (IS_ERR_OR_NULL(di)) {
+		if (!di)
+			ret = -ENOENT;
+		else
+			ret = PTR_ERR(di);
+		goto out;
+	}
 
 	leaf = path->nodes[0];
 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto out;
+	}
 	btrfs_release_path(path);
 
 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
 				 objectid, root->root_key.objectid,
 				 dir_ino, &index, name, name_len);
 	if (ret < 0) {
-		BUG_ON(ret != -ENOENT);
+		if (ret != -ENOENT) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto out;
+		}
 		di = btrfs_search_dir_index_item(root, path, dir_ino,
 						 name, name_len);
-		BUG_ON(IS_ERR_OR_NULL(di));
+		if (IS_ERR_OR_NULL(di)) {
+			if (!di)
+				ret = -ENOENT;
+			else
+				ret = PTR_ERR(di);
+			btrfs_abort_transaction(trans, root, ret);
+			goto out;
+		}
 
 		leaf = path->nodes[0];
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
@@ -2943,15 +3083,19 @@
 	btrfs_release_path(path);
 
 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto out;
+	}
 
 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
 	ret = btrfs_update_inode(trans, root, dir);
-	BUG_ON(ret);
-
+	if (ret)
+		btrfs_abort_transaction(trans, root, ret);
+out:
 	btrfs_free_path(path);
-	return 0;
+	return ret;
 }
 
 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -3161,8 +3305,8 @@
 				}
 				size =
 				    btrfs_file_extent_calc_inline_size(size);
-				ret = btrfs_truncate_item(trans, root, path,
-							  size, 1);
+				btrfs_truncate_item(trans, root, path,
+						    size, 1);
 			} else if (root->ref_cows) {
 				inode_sub_bytes(inode, item_end + 1 -
 						found_key.offset);
@@ -3210,7 +3354,11 @@
 				ret = btrfs_del_items(trans, root, path,
 						pending_del_slot,
 						pending_del_nr);
-				BUG_ON(ret);
+				if (ret) {
+					btrfs_abort_transaction(trans,
+								root, ret);
+					goto error;
+				}
 				pending_del_nr = 0;
 			}
 			btrfs_release_path(path);
@@ -3223,8 +3371,10 @@
 	if (pending_del_nr) {
 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
 				      pending_del_nr);
-		BUG_ON(ret);
+		if (ret)
+			btrfs_abort_transaction(trans, root, ret);
 	}
+error:
 	btrfs_free_path(path);
 	return err;
 }
@@ -3282,8 +3432,7 @@
 	}
 	wait_on_page_writeback(page);
 
-	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
-			 GFP_NOFS);
+	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
 	set_page_extent_mapped(page);
 
 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
@@ -3359,7 +3508,7 @@
 		btrfs_wait_ordered_range(inode, hole_start,
 					 block_end - hole_start);
 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
-				 &cached_state, GFP_NOFS);
+				 &cached_state);
 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
 		if (!ordered)
 			break;
@@ -3372,7 +3521,10 @@
 	while (1) {
 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
 				block_end - cur_offset, 0);
-		BUG_ON(IS_ERR_OR_NULL(em));
+		if (IS_ERR(em)) {
+			err = PTR_ERR(em);
+			break;
+		}
 		last_byte = min(extent_map_end(em), block_end);
 		last_byte = (last_byte + mask) & ~mask;
 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3389,7 +3541,7 @@
 						 cur_offset + hole_size,
 						 &hint_byte, 1);
 			if (err) {
-				btrfs_update_inode(trans, root, inode);
+				btrfs_abort_transaction(trans, root, err);
 				btrfs_end_transaction(trans, root);
 				break;
 			}
@@ -3399,7 +3551,7 @@
 					0, hole_size, 0, hole_size,
 					0, 0, 0);
 			if (err) {
-				btrfs_update_inode(trans, root, inode);
+				btrfs_abort_transaction(trans, root, err);
 				btrfs_end_transaction(trans, root);
 				break;
 			}
@@ -3779,7 +3931,7 @@
 	}
 }
 
-int btrfs_invalidate_inodes(struct btrfs_root *root)
+void btrfs_invalidate_inodes(struct btrfs_root *root)
 {
 	struct rb_node *node;
 	struct rb_node *prev;
@@ -3839,7 +3991,6 @@
 		node = rb_next(node);
 	}
 	spin_unlock(&root->inode_lock);
-	return 0;
 }
 
 static int btrfs_init_locked_inode(struct inode *inode, void *p)
@@ -4581,18 +4732,26 @@
 					     parent_ino, index);
 	}
 
-	if (ret == 0) {
-		ret = btrfs_insert_dir_item(trans, root, name, name_len,
-					    parent_inode, &key,
-					    btrfs_inode_type(inode), index);
-		if (ret)
-			goto fail_dir_item;
+	/* Nothing to clean up yet */
+	if (ret)
+		return ret;
 
-		btrfs_i_size_write(parent_inode, parent_inode->i_size +
-				   name_len * 2);
-		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
-		ret = btrfs_update_inode(trans, root, parent_inode);
+	ret = btrfs_insert_dir_item(trans, root, name, name_len,
+				    parent_inode, &key,
+				    btrfs_inode_type(inode), index);
+	if (ret == -EEXIST)
+		goto fail_dir_item;
+	else if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		return ret;
 	}
+
+	btrfs_i_size_write(parent_inode, parent_inode->i_size +
+			   name_len * 2);
+	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
+	ret = btrfs_update_inode(trans, root, parent_inode);
+	if (ret)
+		btrfs_abort_transaction(trans, root, ret);
 	return ret;
 
 fail_dir_item:
@@ -4806,7 +4965,8 @@
 	} else {
 		struct dentry *parent = dentry->d_parent;
 		err = btrfs_update_inode(trans, root, inode);
-		BUG_ON(err);
+		if (err)
+			goto fail;
 		d_instantiate(dentry, inode);
 		btrfs_log_new_name(trans, inode, NULL, parent);
 	}
@@ -5137,7 +5297,7 @@
 				ret = uncompress_inline(path, inode, page,
 							pg_offset,
 							extent_offset, item);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM */
 			} else {
 				map = kmap(page);
 				read_extent_buffer(leaf, map + pg_offset, ptr,
@@ -5252,6 +5412,7 @@
 		free_extent_map(em);
 		return ERR_PTR(err);
 	}
+	BUG_ON(!em); /* Error is always set */
 	return em;
 }
 
@@ -5414,7 +5575,7 @@
 
 	alloc_hint = get_extent_allocation_hint(inode, start, len);
 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
-				   alloc_hint, (u64)-1, &ins, 1);
+				   alloc_hint, &ins, 1);
 	if (ret) {
 		em = ERR_PTR(ret);
 		goto out;
@@ -5602,7 +5763,7 @@
 		free_extent_map(em);
 		/* DIO will do one hole at a time, so just unlock a sector */
 		unlock_extent(&BTRFS_I(inode)->io_tree, start,
-			      start + root->sectorsize - 1, GFP_NOFS);
+			      start + root->sectorsize - 1);
 		return 0;
 	}
 
@@ -5743,7 +5904,7 @@
 	} while (bvec <= bvec_end);
 
 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
-		      dip->logical_offset + dip->bytes - 1, GFP_NOFS);
+		      dip->logical_offset + dip->bytes - 1);
 	bio->bi_private = dip->private;
 
 	kfree(dip->csums);
@@ -5794,7 +5955,7 @@
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
 			 ordered->file_offset + ordered->len - 1, 0,
-			 &cached_state, GFP_NOFS);
+			 &cached_state);
 
 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
 		ret = btrfs_mark_extent_written(trans, inode,
@@ -5868,7 +6029,7 @@
 	int ret;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 	return 0;
 }
 
@@ -6209,7 +6370,7 @@
 
 	while (1) {
 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-				 0, &cached_state, GFP_NOFS);
+				 0, &cached_state);
 		/*
 		 * We're concerned with the entire range that we're going to be
 		 * doing DIO to, so we need to make sure theres no ordered
@@ -6233,7 +6394,7 @@
 	if (writing) {
 		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-				     EXTENT_DELALLOC, 0, NULL, &cached_state,
+				     EXTENT_DELALLOC, NULL, &cached_state,
 				     GFP_NOFS);
 		if (ret) {
 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
@@ -6363,8 +6524,7 @@
 		btrfs_releasepage(page, GFP_NOFS);
 		return;
 	}
-	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
-			 GFP_NOFS);
+	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
 	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
 					   page_offset(page));
 	if (ordered) {
@@ -6386,8 +6546,7 @@
 		}
 		btrfs_put_ordered_extent(ordered);
 		cached_state = NULL;
-		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
-				 GFP_NOFS);
+		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
 	}
 	clear_extent_bit(tree, page_start, page_end,
 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -6462,8 +6621,7 @@
 	}
 	wait_on_page_writeback(page);
 
-	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
-			 GFP_NOFS);
+	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
 	set_page_extent_mapped(page);
 
 	/*
@@ -6737,10 +6895,9 @@
 	btrfs_i_size_write(inode, 0);
 
 	err = btrfs_update_inode(trans, new_root, inode);
-	BUG_ON(err);
 
 	iput(inode);
-	return 0;
+	return err;
 }
 
 struct inode *btrfs_alloc_inode(struct super_block *sb)
@@ -6783,6 +6940,8 @@
 	extent_map_tree_init(&ei->extent_tree);
 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
+	ei->io_tree.track_uptodate = 1;
+	ei->io_failure_tree.track_uptodate = 1;
 	mutex_init(&ei->log_mutex);
 	mutex_init(&ei->delalloc_mutex);
 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
@@ -7072,7 +7231,10 @@
 		if (!ret)
 			ret = btrfs_update_inode(trans, root, old_inode);
 	}
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto out_fail;
+	}
 
 	if (new_inode) {
 		new_inode->i_ctime = CURRENT_TIME;
@@ -7090,11 +7252,14 @@
 						 new_dentry->d_name.name,
 						 new_dentry->d_name.len);
 		}
-		BUG_ON(ret);
-		if (new_inode->i_nlink == 0) {
+		if (!ret && new_inode->i_nlink == 0) {
 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
 			BUG_ON(ret);
 		}
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
+			goto out_fail;
+		}
 	}
 
 	fixup_inode_flags(new_dir, old_inode);
@@ -7102,7 +7267,10 @@
 	ret = btrfs_add_link(trans, new_dir, old_inode,
 			     new_dentry->d_name.name,
 			     new_dentry->d_name.len, 0, index);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto out_fail;
+	}
 
 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
 		struct dentry *parent = new_dentry->d_parent;
@@ -7315,7 +7483,7 @@
 		}
 
 		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
-					   0, *alloc_hint, (u64)-1, &ins, 1);
+					   0, *alloc_hint, &ins, 1);
 		if (ret) {
 			if (own_trans)
 				btrfs_end_transaction(trans, root);
@@ -7327,7 +7495,12 @@
 						  ins.offset, ins.offset,
 						  ins.offset, 0, 0, 0,
 						  BTRFS_FILE_EXTENT_PREALLOC);
-		BUG_ON(ret);
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
+			if (own_trans)
+				btrfs_end_transaction(trans, root);
+			break;
+		}
 		btrfs_drop_extent_cache(inode, cur_offset,
 					cur_offset + ins.offset -1, 0);
 
@@ -7349,7 +7522,13 @@
 		}
 
 		ret = btrfs_update_inode(trans, root, inode);
-		BUG_ON(ret);
+
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
+			if (own_trans)
+				btrfs_end_transaction(trans, root);
+			break;
+		}
 
 		if (own_trans)
 			btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d8b5471..18cc23d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -425,22 +425,37 @@
 
 	key.offset = (u64)-1;
 	new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
-	BUG_ON(IS_ERR(new_root));
+	if (IS_ERR(new_root)) {
+		btrfs_abort_transaction(trans, root, PTR_ERR(new_root));
+		ret = PTR_ERR(new_root);
+		goto fail;
+	}
 
 	btrfs_record_root_in_trans(trans, new_root);
 
 	ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
+	if (ret) {
+		/* We potentially lose an unused inode item here */
+		btrfs_abort_transaction(trans, root, ret);
+		goto fail;
+	}
+
 	/*
 	 * insert the directory item
 	 */
 	ret = btrfs_set_inode_index(dir, &index);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		goto fail;
+	}
 
 	ret = btrfs_insert_dir_item(trans, root,
 				    name, namelen, dir, &key,
 				    BTRFS_FT_DIR, index);
-	if (ret)
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
 		goto fail;
+	}
 
 	btrfs_i_size_write(dir, dir->i_size + namelen * 2);
 	ret = btrfs_update_inode(trans, root, dir);
@@ -769,6 +784,31 @@
 	return -ENOENT;
 }
 
+/*
+ * Validaty check of prev em and next em:
+ * 1) no prev/next em
+ * 2) prev/next em is an hole/inline extent
+ */
+static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
+{
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	struct extent_map *prev = NULL, *next = NULL;
+	int ret = 0;
+
+	read_lock(&em_tree->lock);
+	prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
+	next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
+	read_unlock(&em_tree->lock);
+
+	if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
+	    (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
+		ret = 1;
+	free_extent_map(prev);
+	free_extent_map(next);
+
+	return ret;
+}
+
 static int should_defrag_range(struct inode *inode, u64 start, u64 len,
 			       int thresh, u64 *last_len, u64 *skip,
 			       u64 *defrag_end)
@@ -797,17 +837,25 @@
 
 	if (!em) {
 		/* get the big lock and read metadata off disk */
-		lock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+		lock_extent(io_tree, start, start + len - 1);
 		em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
-		unlock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+		unlock_extent(io_tree, start, start + len - 1);
 
 		if (IS_ERR(em))
 			return 0;
 	}
 
 	/* this will cover holes, and inline extents */
-	if (em->block_start >= EXTENT_MAP_LAST_BYTE)
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
 		ret = 0;
+		goto out;
+	}
+
+	/* If we have nothing to merge with us, just skip. */
+	if (check_adjacent_extents(inode, em)) {
+		ret = 0;
+		goto out;
+	}
 
 	/*
 	 * we hit a real extent, if it is big don't bother defragging it again
@@ -815,6 +863,7 @@
 	if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
 		ret = 0;
 
+out:
 	/*
 	 * last_len ends up being a counter of how many bytes we've defragged.
 	 * every time we choose not to defrag an extent, we reset *last_len
@@ -856,6 +905,7 @@
 	u64 isize = i_size_read(inode);
 	u64 page_start;
 	u64 page_end;
+	u64 page_cnt;
 	int ret;
 	int i;
 	int i_done;
@@ -864,19 +914,21 @@
 	struct extent_io_tree *tree;
 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 
-	if (isize == 0)
-		return 0;
 	file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+	if (!isize || start_index > file_end)
+		return 0;
+
+	page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
 
 	ret = btrfs_delalloc_reserve_space(inode,
-					   num_pages << PAGE_CACHE_SHIFT);
+					   page_cnt << PAGE_CACHE_SHIFT);
 	if (ret)
 		return ret;
 	i_done = 0;
 	tree = &BTRFS_I(inode)->io_tree;
 
 	/* step one, lock all the pages */
-	for (i = 0; i < num_pages; i++) {
+	for (i = 0; i < page_cnt; i++) {
 		struct page *page;
 again:
 		page = find_or_create_page(inode->i_mapping,
@@ -887,10 +939,10 @@
 		page_start = page_offset(page);
 		page_end = page_start + PAGE_CACHE_SIZE - 1;
 		while (1) {
-			lock_extent(tree, page_start, page_end, GFP_NOFS);
+			lock_extent(tree, page_start, page_end);
 			ordered = btrfs_lookup_ordered_extent(inode,
 							      page_start);
-			unlock_extent(tree, page_start, page_end, GFP_NOFS);
+			unlock_extent(tree, page_start, page_end);
 			if (!ordered)
 				break;
 
@@ -898,6 +950,15 @@
 			btrfs_start_ordered_extent(inode, ordered, 1);
 			btrfs_put_ordered_extent(ordered);
 			lock_page(page);
+			/*
+			 * we unlocked the page above, so we need check if
+			 * it was released or not.
+			 */
+			if (page->mapping != inode->i_mapping) {
+				unlock_page(page);
+				page_cache_release(page);
+				goto again;
+			}
 		}
 
 		if (!PageUptodate(page)) {
@@ -911,15 +972,6 @@
 			}
 		}
 
-		isize = i_size_read(inode);
-		file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
-		if (!isize || page->index > file_end) {
-			/* whoops, we blew past eof, skip this page */
-			unlock_page(page);
-			page_cache_release(page);
-			break;
-		}
-
 		if (page->mapping != inode->i_mapping) {
 			unlock_page(page);
 			page_cache_release(page);
@@ -946,19 +998,18 @@
 	page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree,
-			 page_start, page_end - 1, 0, &cached_state,
-			 GFP_NOFS);
+			 page_start, page_end - 1, 0, &cached_state);
 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
 			  page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
 			  EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
 			  GFP_NOFS);
 
-	if (i_done != num_pages) {
+	if (i_done != page_cnt) {
 		spin_lock(&BTRFS_I(inode)->lock);
 		BTRFS_I(inode)->outstanding_extents++;
 		spin_unlock(&BTRFS_I(inode)->lock);
 		btrfs_delalloc_release_space(inode,
-				     (num_pages - i_done) << PAGE_CACHE_SHIFT);
+				     (page_cnt - i_done) << PAGE_CACHE_SHIFT);
 	}
 
 
@@ -983,7 +1034,7 @@
 		unlock_page(pages[i]);
 		page_cache_release(pages[i]);
 	}
-	btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT);
+	btrfs_delalloc_release_space(inode, page_cnt << PAGE_CACHE_SHIFT);
 	return ret;
 
 }
@@ -1089,12 +1140,9 @@
 		if (!(inode->i_sb->s_flags & MS_ACTIVE))
 			break;
 
-		if (!newer_than &&
-		    !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
-					PAGE_CACHE_SIZE,
-					extent_thresh,
-					&last_len, &skip,
-					&defrag_end)) {
+		if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+					 PAGE_CACHE_SIZE, extent_thresh,
+					 &last_len, &skip, &defrag_end)) {
 			unsigned long next;
 			/*
 			 * the should_defrag function tells us how much to skip
@@ -1123,17 +1171,24 @@
 			ra_index += max_cluster;
 		}
 
+		mutex_lock(&inode->i_mutex);
 		ret = cluster_pages_for_defrag(inode, pages, i, cluster);
-		if (ret < 0)
+		if (ret < 0) {
+			mutex_unlock(&inode->i_mutex);
 			goto out_ra;
+		}
 
 		defrag_count += ret;
 		balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
+		mutex_unlock(&inode->i_mutex);
 
 		if (newer_than) {
 			if (newer_off == (u64)-1)
 				break;
 
+			if (ret > 0)
+				i += ret;
+
 			newer_off = max(newer_off + 1,
 					(u64)i << PAGE_CACHE_SHIFT);
 
@@ -1966,7 +2021,11 @@
 				dest->root_key.objectid,
 				dentry->d_name.name,
 				dentry->d_name.len);
-	BUG_ON(ret);
+	if (ret) {
+		err = ret;
+		btrfs_abort_transaction(trans, root, ret);
+		goto out_end_trans;
+	}
 
 	btrfs_record_root_in_trans(trans, dest);
 
@@ -1979,11 +2038,16 @@
 		ret = btrfs_insert_orphan_item(trans,
 					root->fs_info->tree_root,
 					dest->root_key.objectid);
-		BUG_ON(ret);
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
+			err = ret;
+			goto out_end_trans;
+		}
 	}
-
+out_end_trans:
 	ret = btrfs_end_transaction(trans, root);
-	BUG_ON(ret);
+	if (ret && !err)
+		err = ret;
 	inode->i_flags |= S_DEAD;
 out_up_write:
 	up_write(&root->fs_info->subvol_sem);
@@ -2326,13 +2390,13 @@
 	   another, and lock file content */
 	while (1) {
 		struct btrfs_ordered_extent *ordered;
-		lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+		lock_extent(&BTRFS_I(src)->io_tree, off, off+len);
 		ordered = btrfs_lookup_first_ordered_extent(src, off+len);
 		if (!ordered &&
 		    !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
 				   EXTENT_DELALLOC, 0, NULL))
 			break;
-		unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+		unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
 		if (ordered)
 			btrfs_put_ordered_extent(ordered);
 		btrfs_wait_ordered_range(src, off, len);
@@ -2447,11 +2511,21 @@
 							 new_key.offset,
 							 new_key.offset + datal,
 							 &hint_byte, 1);
-				BUG_ON(ret);
+				if (ret) {
+					btrfs_abort_transaction(trans, root,
+								ret);
+					btrfs_end_transaction(trans, root);
+					goto out;
+				}
 
 				ret = btrfs_insert_empty_item(trans, root, path,
 							      &new_key, size);
-				BUG_ON(ret);
+				if (ret) {
+					btrfs_abort_transaction(trans, root,
+								ret);
+					btrfs_end_transaction(trans, root);
+					goto out;
+				}
 
 				leaf = path->nodes[0];
 				slot = path->slots[0];
@@ -2478,7 +2552,15 @@
 							btrfs_ino(inode),
 							new_key.offset - datao,
 							0);
-					BUG_ON(ret);
+					if (ret) {
+						btrfs_abort_transaction(trans,
+									root,
+									ret);
+						btrfs_end_transaction(trans,
+								      root);
+						goto out;
+
+					}
 				}
 			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
 				u64 skip = 0;
@@ -2503,11 +2585,21 @@
 							 new_key.offset,
 							 new_key.offset + datal,
 							 &hint_byte, 1);
-				BUG_ON(ret);
+				if (ret) {
+					btrfs_abort_transaction(trans, root,
+								ret);
+					btrfs_end_transaction(trans, root);
+					goto out;
+				}
 
 				ret = btrfs_insert_empty_item(trans, root, path,
 							      &new_key, size);
-				BUG_ON(ret);
+				if (ret) {
+					btrfs_abort_transaction(trans, root,
+								ret);
+					btrfs_end_transaction(trans, root);
+					goto out;
+				}
 
 				if (skip) {
 					u32 start =
@@ -2541,8 +2633,12 @@
 				btrfs_i_size_write(inode, endoff);
 
 			ret = btrfs_update_inode(trans, root, inode);
-			BUG_ON(ret);
-			btrfs_end_transaction(trans, root);
+			if (ret) {
+				btrfs_abort_transaction(trans, root, ret);
+				btrfs_end_transaction(trans, root);
+				goto out;
+			}
+			ret = btrfs_end_transaction(trans, root);
 		}
 next:
 		btrfs_release_path(path);
@@ -2551,7 +2647,7 @@
 	ret = 0;
 out:
 	btrfs_release_path(path);
-	unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+	unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
 out_unlock:
 	mutex_unlock(&src->i_mutex);
 	mutex_unlock(&inode->i_mutex);
@@ -3066,8 +3162,8 @@
 		goto out;
 
 	extent_item_pos = loi->logical - key.objectid;
-	ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
-					extent_item_pos, build_ino_list,
+	ret = iterate_extent_inodes(root->fs_info, key.objectid,
+					extent_item_pos, 0, build_ino_list,
 					inodes);
 
 	if (ret < 0)
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5e178d8..272f911 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -208,7 +208,7 @@
  * take a spinning write lock.  This will wait for both
  * blocking readers or writers
  */
-int btrfs_tree_lock(struct extent_buffer *eb)
+void btrfs_tree_lock(struct extent_buffer *eb)
 {
 again:
 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
@@ -230,13 +230,12 @@
 	atomic_inc(&eb->spinning_writers);
 	atomic_inc(&eb->write_locks);
 	eb->lock_owner = current->pid;
-	return 0;
 }
 
 /*
  * drop a spinning or a blocking write lock.
  */
-int btrfs_tree_unlock(struct extent_buffer *eb)
+void btrfs_tree_unlock(struct extent_buffer *eb)
 {
 	int blockers = atomic_read(&eb->blocking_writers);
 
@@ -255,7 +254,6 @@
 		atomic_dec(&eb->spinning_writers);
 		write_unlock(&eb->lock);
 	}
-	return 0;
 }
 
 void btrfs_assert_tree_locked(struct extent_buffer *eb)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 17247dd..ca52681 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -24,8 +24,8 @@
 #define BTRFS_WRITE_LOCK_BLOCKING 3
 #define BTRFS_READ_LOCK_BLOCKING 4
 
-int btrfs_tree_lock(struct extent_buffer *eb);
-int btrfs_tree_unlock(struct extent_buffer *eb);
+void btrfs_tree_lock(struct extent_buffer *eb);
+void btrfs_tree_unlock(struct extent_buffer *eb);
 int btrfs_try_spin_lock(struct extent_buffer *eb);
 
 void btrfs_tree_read_lock(struct extent_buffer *eb);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a1c9404..bbf6d0d 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -59,6 +59,14 @@
 	return NULL;
 }
 
+static void ordered_data_tree_panic(struct inode *inode, int errno,
+					       u64 offset)
+{
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
+		    "%llu\n", (unsigned long long)offset);
+}
+
 /*
  * look for a given offset in the tree, and if it can't be found return the
  * first lesser offset
@@ -207,7 +215,8 @@
 	spin_lock(&tree->lock);
 	node = tree_insert(&tree->tree, file_offset,
 			   &entry->rb_node);
-	BUG_ON(node);
+	if (node)
+		ordered_data_tree_panic(inode, -EEXIST, file_offset);
 	spin_unlock(&tree->lock);
 
 	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
@@ -215,7 +224,6 @@
 		      &BTRFS_I(inode)->root->fs_info->ordered_extents);
 	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
 
-	BUG_ON(node);
 	return 0;
 }
 
@@ -249,9 +257,9 @@
  * when an ordered extent is finished.  If the list covers more than one
  * ordered extent, it is split across multiples.
  */
-int btrfs_add_ordered_sum(struct inode *inode,
-			  struct btrfs_ordered_extent *entry,
-			  struct btrfs_ordered_sum *sum)
+void btrfs_add_ordered_sum(struct inode *inode,
+			   struct btrfs_ordered_extent *entry,
+			   struct btrfs_ordered_sum *sum)
 {
 	struct btrfs_ordered_inode_tree *tree;
 
@@ -259,7 +267,6 @@
 	spin_lock(&tree->lock);
 	list_add_tail(&sum->list, &entry->list);
 	spin_unlock(&tree->lock);
-	return 0;
 }
 
 /*
@@ -384,7 +391,7 @@
  * used to drop a reference on an ordered extent.  This will free
  * the extent if the last reference is dropped
  */
-int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
+void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 {
 	struct list_head *cur;
 	struct btrfs_ordered_sum *sum;
@@ -400,7 +407,6 @@
 		}
 		kfree(entry);
 	}
-	return 0;
 }
 
 /*
@@ -408,8 +414,8 @@
  * and you must wake_up entry->wait.  You must hold the tree lock
  * while you call this function.
  */
-static int __btrfs_remove_ordered_extent(struct inode *inode,
-				struct btrfs_ordered_extent *entry)
+static void __btrfs_remove_ordered_extent(struct inode *inode,
+					  struct btrfs_ordered_extent *entry)
 {
 	struct btrfs_ordered_inode_tree *tree;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -436,35 +442,30 @@
 		list_del_init(&BTRFS_I(inode)->ordered_operations);
 	}
 	spin_unlock(&root->fs_info->ordered_extent_lock);
-
-	return 0;
 }
 
 /*
  * remove an ordered extent from the tree.  No references are dropped
  * but any waiters are woken.
  */
-int btrfs_remove_ordered_extent(struct inode *inode,
-				struct btrfs_ordered_extent *entry)
+void btrfs_remove_ordered_extent(struct inode *inode,
+				 struct btrfs_ordered_extent *entry)
 {
 	struct btrfs_ordered_inode_tree *tree;
-	int ret;
 
 	tree = &BTRFS_I(inode)->ordered_tree;
 	spin_lock(&tree->lock);
-	ret = __btrfs_remove_ordered_extent(inode, entry);
+	__btrfs_remove_ordered_extent(inode, entry);
 	spin_unlock(&tree->lock);
 	wake_up(&entry->wait);
-
-	return ret;
 }
 
 /*
  * wait for all the ordered extents in a root.  This is done when balancing
  * space between drives.
  */
-int btrfs_wait_ordered_extents(struct btrfs_root *root,
-			       int nocow_only, int delay_iput)
+void btrfs_wait_ordered_extents(struct btrfs_root *root,
+				int nocow_only, int delay_iput)
 {
 	struct list_head splice;
 	struct list_head *cur;
@@ -512,7 +513,6 @@
 		spin_lock(&root->fs_info->ordered_extent_lock);
 	}
 	spin_unlock(&root->fs_info->ordered_extent_lock);
-	return 0;
 }
 
 /*
@@ -525,7 +525,7 @@
  * extra check to make sure the ordered operation list really is empty
  * before we return
  */
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
+void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
 {
 	struct btrfs_inode *btrfs_inode;
 	struct inode *inode;
@@ -573,8 +573,6 @@
 
 	spin_unlock(&root->fs_info->ordered_extent_lock);
 	mutex_unlock(&root->fs_info->ordered_operations_mutex);
-
-	return 0;
 }
 
 /*
@@ -609,7 +607,7 @@
 /*
  * Used to wait on ordered extents across a large range of bytes.
  */
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 {
 	u64 end;
 	u64 orig_end;
@@ -664,7 +662,6 @@
 		schedule_timeout(1);
 		goto again;
 	}
-	return 0;
 }
 
 /*
@@ -948,9 +945,8 @@
  * If trans is not null, we'll do a friendly check for a transaction that
  * is already flushing things and force the IO down ourselves.
  */
-int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
-				struct inode *inode)
+void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root, struct inode *inode)
 {
 	u64 last_mod;
 
@@ -961,7 +957,7 @@
 	 * commit, we can safely return without doing anything
 	 */
 	if (last_mod < root->fs_info->last_trans_committed)
-		return 0;
+		return;
 
 	/*
 	 * the transaction is already committing.  Just start the IO and
@@ -969,7 +965,7 @@
 	 */
 	if (trans && root->fs_info->running_transaction->blocked) {
 		btrfs_wait_ordered_range(inode, 0, (u64)-1);
-		return 0;
+		return;
 	}
 
 	spin_lock(&root->fs_info->ordered_extent_lock);
@@ -978,6 +974,4 @@
 			      &root->fs_info->ordered_operations);
 	}
 	spin_unlock(&root->fs_info->ordered_extent_lock);
-
-	return 0;
 }
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index ff1f69a..c355ad4 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -138,8 +138,8 @@
 	t->last = NULL;
 }
 
-int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
-int btrfs_remove_ordered_extent(struct inode *inode,
+void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
+void btrfs_remove_ordered_extent(struct inode *inode,
 				struct btrfs_ordered_extent *entry);
 int btrfs_dec_test_ordered_pending(struct inode *inode,
 				   struct btrfs_ordered_extent **cached,
@@ -154,14 +154,14 @@
 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
 				      u64 start, u64 len, u64 disk_len,
 				      int type, int compress_type);
-int btrfs_add_ordered_sum(struct inode *inode,
-			  struct btrfs_ordered_extent *entry,
-			  struct btrfs_ordered_sum *sum);
+void btrfs_add_ordered_sum(struct inode *inode,
+			   struct btrfs_ordered_extent *entry,
+			   struct btrfs_ordered_sum *sum);
 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
 							 u64 file_offset);
 void btrfs_start_ordered_extent(struct inode *inode,
 				struct btrfs_ordered_extent *entry, int wait);
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
 struct btrfs_ordered_extent *
 btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
@@ -170,10 +170,10 @@
 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 				struct btrfs_ordered_extent *ordered);
 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
-int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
-				struct inode *inode);
-int btrfs_wait_ordered_extents(struct btrfs_root *root,
-			       int nocow_only, int delay_iput);
+void btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
+void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct inode *inode);
+void btrfs_wait_ordered_extents(struct btrfs_root *root,
+				int nocow_only, int delay_iput);
 #endif
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index f8be250..24cad16 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -58,7 +58,7 @@
 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 	if (ret < 0)
 		goto out;
-	if (ret) {
+	if (ret) { /* JDM: Really? */
 		ret = -ENOENT;
 		goto out;
 	}
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 22db045..dc5d331 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -54,7 +54,6 @@
  * than the 2 started one after another.
  */
 
-#define MAX_MIRRORS 2
 #define MAX_IN_FLIGHT 6
 
 struct reada_extctl {
@@ -71,7 +70,7 @@
 	struct list_head	extctl;
 	struct kref		refcnt;
 	spinlock_t		lock;
-	struct reada_zone	*zones[MAX_MIRRORS];
+	struct reada_zone	*zones[BTRFS_MAX_MIRRORS];
 	int			nzones;
 	struct btrfs_device	*scheduled_for;
 };
@@ -84,7 +83,8 @@
 	spinlock_t		lock;
 	int			locked;
 	struct btrfs_device	*device;
-	struct btrfs_device	*devs[MAX_MIRRORS]; /* full list, incl self */
+	struct btrfs_device	*devs[BTRFS_MAX_MIRRORS]; /* full list, incl
+							   * self */
 	int			ndevs;
 	struct kref		refcnt;
 };
@@ -365,9 +365,9 @@
 	if (ret || !bbio || length < blocksize)
 		goto error;
 
-	if (bbio->num_stripes > MAX_MIRRORS) {
+	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
 		printk(KERN_ERR "btrfs readahead: more than %d copies not "
-				"supported", MAX_MIRRORS);
+				"supported", BTRFS_MAX_MIRRORS);
 		goto error;
 	}
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8c1aae2..017281d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -326,6 +326,19 @@
 	return NULL;
 }
 
+void backref_tree_panic(struct rb_node *rb_node, int errno,
+					  u64 bytenr)
+{
+
+	struct btrfs_fs_info *fs_info = NULL;
+	struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
+					      rb_node);
+	if (bnode->root)
+		fs_info = bnode->root->fs_info;
+	btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
+		    "found at offset %llu\n", (unsigned long long)bytenr);
+}
+
 /*
  * walk up backref nodes until reach node presents tree root
  */
@@ -452,7 +465,8 @@
 	rb_erase(&node->rb_node, &cache->rb_root);
 	node->bytenr = bytenr;
 	rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
-	BUG_ON(rb_node);
+	if (rb_node)
+		backref_tree_panic(rb_node, -EEXIST, bytenr);
 }
 
 /*
@@ -999,7 +1013,8 @@
 	if (!cowonly) {
 		rb_node = tree_insert(&cache->rb_root, node->bytenr,
 				      &node->rb_node);
-		BUG_ON(rb_node);
+		if (rb_node)
+			backref_tree_panic(rb_node, -EEXIST, node->bytenr);
 		list_add_tail(&node->lower, &cache->leaves);
 	}
 
@@ -1034,7 +1049,9 @@
 		if (!cowonly) {
 			rb_node = tree_insert(&cache->rb_root, upper->bytenr,
 					      &upper->rb_node);
-			BUG_ON(rb_node);
+			if (rb_node)
+				backref_tree_panic(rb_node, -EEXIST,
+						   upper->bytenr);
 		}
 
 		list_add_tail(&edge->list[UPPER], &upper->lower);
@@ -1180,7 +1197,8 @@
 
 	rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
 			      &new_node->rb_node);
-	BUG_ON(rb_node);
+	if (rb_node)
+		backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
 
 	if (!new_node->lowest) {
 		list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
@@ -1203,14 +1221,15 @@
 /*
  * helper to add 'address of tree root -> reloc tree' mapping
  */
-static int __add_reloc_root(struct btrfs_root *root)
+static int __must_check __add_reloc_root(struct btrfs_root *root)
 {
 	struct rb_node *rb_node;
 	struct mapping_node *node;
 	struct reloc_control *rc = root->fs_info->reloc_ctl;
 
 	node = kmalloc(sizeof(*node), GFP_NOFS);
-	BUG_ON(!node);
+	if (!node)
+		return -ENOMEM;
 
 	node->bytenr = root->node->start;
 	node->data = root;
@@ -1219,7 +1238,12 @@
 	rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
 			      node->bytenr, &node->rb_node);
 	spin_unlock(&rc->reloc_root_tree.lock);
-	BUG_ON(rb_node);
+	if (rb_node) {
+		kfree(node);
+		btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
+			    "for start=%llu while inserting into relocation "
+			    "tree\n");
+	}
 
 	list_add_tail(&root->root_list, &rc->reloc_roots);
 	return 0;
@@ -1252,7 +1276,8 @@
 		rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
 				      node->bytenr, &node->rb_node);
 		spin_unlock(&rc->reloc_root_tree.lock);
-		BUG_ON(rb_node);
+		if (rb_node)
+			backref_tree_panic(rb_node, -EEXIST, node->bytenr);
 	} else {
 		list_del_init(&root->root_list);
 		kfree(node);
@@ -1334,6 +1359,7 @@
 	struct btrfs_root *reloc_root;
 	struct reloc_control *rc = root->fs_info->reloc_ctl;
 	int clear_rsv = 0;
+	int ret;
 
 	if (root->reloc_root) {
 		reloc_root = root->reloc_root;
@@ -1353,7 +1379,8 @@
 	if (clear_rsv)
 		trans->block_rsv = NULL;
 
-	__add_reloc_root(reloc_root);
+	ret = __add_reloc_root(reloc_root);
+	BUG_ON(ret < 0);
 	root->reloc_root = reloc_root;
 	return 0;
 }
@@ -1577,15 +1604,14 @@
 				WARN_ON(!IS_ALIGNED(end, root->sectorsize));
 				end--;
 				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
-						      key.offset, end,
-						      GFP_NOFS);
+						      key.offset, end);
 				if (!ret)
 					continue;
 
 				btrfs_drop_extent_cache(inode, key.offset, end,
 							1);
 				unlock_extent(&BTRFS_I(inode)->io_tree,
-					      key.offset, end, GFP_NOFS);
+					      key.offset, end);
 			}
 		}
 
@@ -1956,9 +1982,9 @@
 		}
 
 		/* the lock_extent waits for readpage to complete */
-		lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+		lock_extent(&BTRFS_I(inode)->io_tree, start, end);
 		btrfs_drop_extent_cache(inode, start, end, 1);
-		unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+		unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
 	}
 	return 0;
 }
@@ -2246,7 +2272,8 @@
 		} else {
 			list_del_init(&reloc_root->root_list);
 		}
-		btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
+		ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
+		BUG_ON(ret < 0);
 	}
 
 	if (found) {
@@ -2862,12 +2889,12 @@
 		else
 			end = cluster->end - offset;
 
-		lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+		lock_extent(&BTRFS_I(inode)->io_tree, start, end);
 		num_bytes = end + 1 - start;
 		ret = btrfs_prealloc_file_range(inode, 0, start,
 						num_bytes, num_bytes,
 						end + 1, &alloc_hint);
-		unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+		unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
 		if (ret)
 			break;
 		nr++;
@@ -2899,7 +2926,7 @@
 	em->bdev = root->fs_info->fs_devices->latest_bdev;
 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
 
-	lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+	lock_extent(&BTRFS_I(inode)->io_tree, start, end);
 	while (1) {
 		write_lock(&em_tree->lock);
 		ret = add_extent_mapping(em_tree, em);
@@ -2910,7 +2937,7 @@
 		}
 		btrfs_drop_extent_cache(inode, start, end, 0);
 	}
-	unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+	unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
 	return ret;
 }
 
@@ -2990,8 +3017,7 @@
 		page_start = (u64)page->index << PAGE_CACHE_SHIFT;
 		page_end = page_start + PAGE_CACHE_SIZE - 1;
 
-		lock_extent(&BTRFS_I(inode)->io_tree,
-			    page_start, page_end, GFP_NOFS);
+		lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
 
 		set_page_extent_mapped(page);
 
@@ -3007,7 +3033,7 @@
 		set_page_dirty(page);
 
 		unlock_extent(&BTRFS_I(inode)->io_tree,
-			      page_start, page_end, GFP_NOFS);
+			      page_start, page_end);
 		unlock_page(page);
 		page_cache_release(page);
 
@@ -3154,7 +3180,8 @@
 	block->key_ready = 0;
 
 	rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
-	BUG_ON(rb_node);
+	if (rb_node)
+		backref_tree_panic(rb_node, -EEXIST, block->bytenr);
 
 	return 0;
 }
@@ -3426,7 +3453,9 @@
 			block->key_ready = 1;
 			rb_node = tree_insert(blocks, block->bytenr,
 					      &block->rb_node);
-			BUG_ON(rb_node);
+			if (rb_node)
+				backref_tree_panic(rb_node, -EEXIST,
+						   block->bytenr);
 		}
 		if (counted)
 			added = 1;
@@ -4073,10 +4102,11 @@
 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
 {
 	struct btrfs_trans_handle *trans;
-	int ret;
+	int ret, err;
 
 	trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 
 	memset(&root->root_item.drop_progress, 0,
 		sizeof(root->root_item.drop_progress));
@@ -4084,11 +4114,11 @@
 	btrfs_set_root_refs(&root->root_item, 0);
 	ret = btrfs_update_root(trans, root->fs_info->tree_root,
 				&root->root_key, &root->root_item);
-	BUG_ON(ret);
 
-	ret = btrfs_end_transaction(trans, root->fs_info->tree_root);
-	BUG_ON(ret);
-	return 0;
+	err = btrfs_end_transaction(trans, root->fs_info->tree_root);
+	if (err)
+		return err;
+	return ret;
 }
 
 /*
@@ -4156,7 +4186,11 @@
 					err = ret;
 					goto out;
 				}
-				mark_garbage_root(reloc_root);
+				ret = mark_garbage_root(reloc_root);
+				if (ret < 0) {
+					err = ret;
+					goto out;
+				}
 			}
 		}
 
@@ -4202,13 +4236,19 @@
 
 		fs_root = read_fs_root(root->fs_info,
 				       reloc_root->root_key.offset);
-		BUG_ON(IS_ERR(fs_root));
+		if (IS_ERR(fs_root)) {
+			err = PTR_ERR(fs_root);
+			goto out_free;
+		}
 
-		__add_reloc_root(reloc_root);
+		err = __add_reloc_root(reloc_root);
+		BUG_ON(err < 0); /* -ENOMEM or logic error */
 		fs_root->reloc_root = reloc_root;
 	}
 
-	btrfs_commit_transaction(trans, rc->extent_root);
+	err = btrfs_commit_transaction(trans, rc->extent_root);
+	if (err)
+		goto out_free;
 
 	merge_reloc_roots(rc);
 
@@ -4218,7 +4258,7 @@
 	if (IS_ERR(trans))
 		err = PTR_ERR(trans);
 	else
-		btrfs_commit_transaction(trans, rc->extent_root);
+		err = btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
 	kfree(rc);
 out:
@@ -4267,6 +4307,8 @@
 	disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
 				       disk_bytenr + len - 1, &list, 0);
+	if (ret)
+		goto out;
 
 	while (!list_empty(&list)) {
 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
@@ -4284,6 +4326,7 @@
 
 		btrfs_add_ordered_sum(inode, ordered, sums);
 	}
+out:
 	btrfs_put_ordered_extent(ordered);
 	return ret;
 }
@@ -4380,7 +4423,7 @@
  * called after snapshot is created. migrate block reservation
  * and create reloc root for the newly created snapshot
  */
-void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
+int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
 			       struct btrfs_pending_snapshot *pending)
 {
 	struct btrfs_root *root = pending->root;
@@ -4390,7 +4433,7 @@
 	int ret;
 
 	if (!root->reloc_root)
-		return;
+		return 0;
 
 	rc = root->fs_info->reloc_ctl;
 	rc->merging_rsv_size += rc->nodes_relocated;
@@ -4399,18 +4442,21 @@
 		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
 					      rc->block_rsv,
 					      rc->nodes_relocated);
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 	}
 
 	new_root = pending->snap;
 	reloc_root = create_reloc_root(trans, root->reloc_root,
 				       new_root->root_key.objectid);
+	if (IS_ERR(reloc_root))
+		return PTR_ERR(reloc_root);
 
-	__add_reloc_root(reloc_root);
+	ret = __add_reloc_root(reloc_root);
+	BUG_ON(ret < 0);
 	new_root->reloc_root = reloc_root;
 
-	if (rc->create_reloc_tree) {
+	if (rc->create_reloc_tree)
 		ret = clone_backref_node(trans, rc, root, reloc_root);
-		BUG_ON(ret);
-	}
+	return ret;
 }
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f409990..24fb8ce 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -93,10 +93,14 @@
 	unsigned long ptr;
 
 	path = btrfs_alloc_path();
-	BUG_ON(!path);
+	if (!path)
+		return -ENOMEM;
+
 	ret = btrfs_search_slot(trans, root, key, path, 0, 1);
-	if (ret < 0)
+	if (ret < 0) {
+		btrfs_abort_transaction(trans, root, ret);
 		goto out;
+	}
 
 	if (ret != 0) {
 		btrfs_print_leaf(root, path->nodes[0]);
@@ -116,13 +120,10 @@
 	return ret;
 }
 
-int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
-		      *root, struct btrfs_key *key, struct btrfs_root_item
-		      *item)
+int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		      struct btrfs_key *key, struct btrfs_root_item *item)
 {
-	int ret;
-	ret = btrfs_insert_item(trans, root, key, item, sizeof(*item));
-	return ret;
+	return btrfs_insert_item(trans, root, key, item, sizeof(*item));
 }
 
 /*
@@ -384,6 +385,8 @@
  *
  * For a back ref the root_id is the id of the subvol or snapshot and
  * ref_id is the id of the tree referencing it.
+ *
+ * Will return 0, -ENOMEM, or anything from the CoW path
  */
 int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *tree_root,
@@ -407,7 +410,11 @@
 again:
 	ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
 				      sizeof(*ref) + name_len);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, tree_root, ret);
+		btrfs_free_path(path);
+		return ret;
+	}
 
 	leaf = path->nodes[0];
 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 390e710..90acc82 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -36,37 +36,30 @@
  * Future enhancements:
  *  - In case an unrepairable extent is encountered, track which files are
  *    affected and report them
- *  - In case of a read error on files with nodatasum, map the file and read
- *    the extent to trigger a writeback of the good copy
  *  - track and record media errors, throw out bad devices
  *  - add a mode to also read unallocated space
  */
 
-struct scrub_bio;
-struct scrub_page;
+struct scrub_block;
 struct scrub_dev;
-static void scrub_bio_end_io(struct bio *bio, int err);
-static void scrub_checksum(struct btrfs_work *work);
-static int scrub_checksum_data(struct scrub_dev *sdev,
-			       struct scrub_page *spag, void *buffer);
-static int scrub_checksum_tree_block(struct scrub_dev *sdev,
-				     struct scrub_page *spag, u64 logical,
-				     void *buffer);
-static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
-static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
-static void scrub_fixup_end_io(struct bio *bio, int err);
-static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
-			  struct page *page);
-static void scrub_fixup(struct scrub_bio *sbio, int ix);
 
 #define SCRUB_PAGES_PER_BIO	16	/* 64k per bio */
 #define SCRUB_BIOS_PER_DEV	16	/* 1 MB per device in flight */
+#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
 
 struct scrub_page {
+	struct scrub_block	*sblock;
+	struct page		*page;
+	struct block_device	*bdev;
 	u64			flags;  /* extent flags */
 	u64			generation;
-	int			mirror_num;
-	int			have_csum;
+	u64			logical;
+	u64			physical;
+	struct {
+		unsigned int	mirror_num:8;
+		unsigned int	have_csum:1;
+		unsigned int	io_error:1;
+	};
 	u8			csum[BTRFS_CSUM_SIZE];
 };
 
@@ -77,12 +70,25 @@
 	int			err;
 	u64			logical;
 	u64			physical;
-	struct scrub_page	spag[SCRUB_PAGES_PER_BIO];
-	u64			count;
+	struct scrub_page	*pagev[SCRUB_PAGES_PER_BIO];
+	int			page_count;
 	int			next_free;
 	struct btrfs_work	work;
 };
 
+struct scrub_block {
+	struct scrub_page	pagev[SCRUB_MAX_PAGES_PER_BLOCK];
+	int			page_count;
+	atomic_t		outstanding_pages;
+	atomic_t		ref_count; /* free mem on transition to zero */
+	struct scrub_dev	*sdev;
+	struct {
+		unsigned int	header_error:1;
+		unsigned int	checksum_error:1;
+		unsigned int	no_io_error_seen:1;
+	};
+};
+
 struct scrub_dev {
 	struct scrub_bio	*bios[SCRUB_BIOS_PER_DEV];
 	struct btrfs_device	*dev;
@@ -96,6 +102,10 @@
 	struct list_head	csum_list;
 	atomic_t		cancel_req;
 	int			readonly;
+	int			pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
+	u32			sectorsize;
+	u32			nodesize;
+	u32			leafsize;
 	/*
 	 * statistics
 	 */
@@ -124,6 +134,43 @@
 	int			scratch_bufsize;
 };
 
+
+static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
+static int scrub_setup_recheck_block(struct scrub_dev *sdev,
+				     struct btrfs_mapping_tree *map_tree,
+				     u64 length, u64 logical,
+				     struct scrub_block *sblock);
+static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
+			       struct scrub_block *sblock, int is_metadata,
+			       int have_csum, u8 *csum, u64 generation,
+			       u16 csum_size);
+static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
+					 struct scrub_block *sblock,
+					 int is_metadata, int have_csum,
+					 const u8 *csum, u64 generation,
+					 u16 csum_size);
+static void scrub_complete_bio_end_io(struct bio *bio, int err);
+static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
+					     struct scrub_block *sblock_good,
+					     int force_write);
+static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
+					    struct scrub_block *sblock_good,
+					    int page_num, int force_write);
+static int scrub_checksum_data(struct scrub_block *sblock);
+static int scrub_checksum_tree_block(struct scrub_block *sblock);
+static int scrub_checksum_super(struct scrub_block *sblock);
+static void scrub_block_get(struct scrub_block *sblock);
+static void scrub_block_put(struct scrub_block *sblock);
+static int scrub_add_page_to_bio(struct scrub_dev *sdev,
+				 struct scrub_page *spage);
+static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
+		       u64 physical, u64 flags, u64 gen, int mirror_num,
+		       u8 *csum, int force);
+static void scrub_bio_end_io(struct bio *bio, int err);
+static void scrub_bio_end_io_worker(struct btrfs_work *work);
+static void scrub_block_complete(struct scrub_block *sblock);
+
+
 static void scrub_free_csums(struct scrub_dev *sdev)
 {
 	while (!list_empty(&sdev->csum_list)) {
@@ -135,23 +182,6 @@
 	}
 }
 
-static void scrub_free_bio(struct bio *bio)
-{
-	int i;
-	struct page *last_page = NULL;
-
-	if (!bio)
-		return;
-
-	for (i = 0; i < bio->bi_vcnt; ++i) {
-		if (bio->bi_io_vec[i].bv_page == last_page)
-			continue;
-		last_page = bio->bi_io_vec[i].bv_page;
-		__free_page(last_page);
-	}
-	bio_put(bio);
-}
-
 static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
 {
 	int i;
@@ -159,13 +189,23 @@
 	if (!sdev)
 		return;
 
+	/* this can happen when scrub is cancelled */
+	if (sdev->curr != -1) {
+		struct scrub_bio *sbio = sdev->bios[sdev->curr];
+
+		for (i = 0; i < sbio->page_count; i++) {
+			BUG_ON(!sbio->pagev[i]);
+			BUG_ON(!sbio->pagev[i]->page);
+			scrub_block_put(sbio->pagev[i]->sblock);
+		}
+		bio_put(sbio->bio);
+	}
+
 	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 		struct scrub_bio *sbio = sdev->bios[i];
 
 		if (!sbio)
 			break;
-
-		scrub_free_bio(sbio->bio);
 		kfree(sbio);
 	}
 
@@ -179,11 +219,16 @@
 	struct scrub_dev *sdev;
 	int		i;
 	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+	int pages_per_bio;
 
+	pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
+			      bio_get_nr_vecs(dev->bdev));
 	sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
 	if (!sdev)
 		goto nomem;
 	sdev->dev = dev;
+	sdev->pages_per_bio = pages_per_bio;
+	sdev->curr = -1;
 	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 		struct scrub_bio *sbio;
 
@@ -194,8 +239,8 @@
 
 		sbio->index = i;
 		sbio->sdev = sdev;
-		sbio->count = 0;
-		sbio->work.func = scrub_checksum;
+		sbio->page_count = 0;
+		sbio->work.func = scrub_bio_end_io_worker;
 
 		if (i != SCRUB_BIOS_PER_DEV-1)
 			sdev->bios[i]->next_free = i + 1;
@@ -203,7 +248,9 @@
 			sdev->bios[i]->next_free = -1;
 	}
 	sdev->first_free = 0;
-	sdev->curr = -1;
+	sdev->nodesize = dev->dev_root->nodesize;
+	sdev->leafsize = dev->dev_root->leafsize;
+	sdev->sectorsize = dev->dev_root->sectorsize;
 	atomic_set(&sdev->in_flight, 0);
 	atomic_set(&sdev->fixup_cnt, 0);
 	atomic_set(&sdev->cancel_req, 0);
@@ -294,10 +341,9 @@
 	return 0;
 }
 
-static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
-				int ix)
+static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 {
-	struct btrfs_device *dev = sbio->sdev->dev;
+	struct btrfs_device *dev = sblock->sdev->dev;
 	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 	struct btrfs_path *path;
 	struct btrfs_key found_key;
@@ -316,8 +362,9 @@
 
 	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
 	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
-	swarn.sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
-	swarn.logical = sbio->logical + ix * PAGE_SIZE;
+	BUG_ON(sblock->page_count < 1);
+	swarn.sector = (sblock->pagev[0].physical) >> 9;
+	swarn.logical = sblock->pagev[0].logical;
 	swarn.errstr = errstr;
 	swarn.dev = dev;
 	swarn.msg_bufsize = bufsize;
@@ -342,7 +389,8 @@
 		do {
 			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
 							&ref_root, &ref_level);
-			printk(KERN_WARNING "%s at logical %llu on dev %s, "
+			printk(KERN_WARNING
+				"btrfs: %s at logical %llu on dev %s, "
 				"sector %llu: metadata %s (level %d) in tree "
 				"%llu\n", errstr, swarn.logical, dev->name,
 				(unsigned long long)swarn.sector,
@@ -352,8 +400,8 @@
 		} while (ret != 1);
 	} else {
 		swarn.path = path;
-		iterate_extent_inodes(fs_info, path, found_key.objectid,
-					extent_item_pos,
+		iterate_extent_inodes(fs_info, found_key.objectid,
+					extent_item_pos, 1,
 					scrub_print_warning_inode, &swarn);
 	}
 
@@ -531,9 +579,9 @@
 		spin_lock(&sdev->stat_lock);
 		++sdev->stat.uncorrectable_errors;
 		spin_unlock(&sdev->stat_lock);
-		printk_ratelimited(KERN_ERR "btrfs: unable to fixup "
-					"(nodatasum) error at logical %llu\n",
-					fixup->logical);
+		printk_ratelimited(KERN_ERR
+			"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
+			(unsigned long long)fixup->logical, sdev->dev->name);
 	}
 
 	btrfs_free_path(path);
@@ -550,91 +598,168 @@
 }
 
 /*
- * scrub_recheck_error gets called when either verification of the page
- * failed or the bio failed to read, e.g. with EIO. In the latter case,
- * recheck_error gets called for every page in the bio, even though only
- * one may be bad
+ * scrub_handle_errored_block gets called when either verification of the
+ * pages failed or the bio failed to read, e.g. with EIO. In the latter
+ * case, this function handles all pages in the bio, even though only one
+ * may be bad.
+ * The goal of this function is to repair the errored block by using the
+ * contents of one of the mirrors.
  */
-static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
+static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 {
-	struct scrub_dev *sdev = sbio->sdev;
-	u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
-	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
-					DEFAULT_RATELIMIT_BURST);
-
-	if (sbio->err) {
-		if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
-				   sbio->bio->bi_io_vec[ix].bv_page) == 0) {
-			if (scrub_fixup_check(sbio, ix) == 0)
-				return 0;
-		}
-		if (__ratelimit(&_rs))
-			scrub_print_warning("i/o error", sbio, ix);
-	} else {
-		if (__ratelimit(&_rs))
-			scrub_print_warning("checksum error", sbio, ix);
-	}
-
-	spin_lock(&sdev->stat_lock);
-	++sdev->stat.read_errors;
-	spin_unlock(&sdev->stat_lock);
-
-	scrub_fixup(sbio, ix);
-	return 1;
-}
-
-static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
-{
-	int ret = 1;
-	struct page *page;
-	void *buffer;
-	u64 flags = sbio->spag[ix].flags;
-
-	page = sbio->bio->bi_io_vec[ix].bv_page;
-	buffer = kmap_atomic(page);
-	if (flags & BTRFS_EXTENT_FLAG_DATA) {
-		ret = scrub_checksum_data(sbio->sdev,
-					  sbio->spag + ix, buffer);
-	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
-		ret = scrub_checksum_tree_block(sbio->sdev,
-						sbio->spag + ix,
-						sbio->logical + ix * PAGE_SIZE,
-						buffer);
-	} else {
-		WARN_ON(1);
-	}
-	kunmap_atomic(buffer);
-
-	return ret;
-}
-
-static void scrub_fixup_end_io(struct bio *bio, int err)
-{
-	complete((struct completion *)bio->bi_private);
-}
-
-static void scrub_fixup(struct scrub_bio *sbio, int ix)
-{
-	struct scrub_dev *sdev = sbio->sdev;
-	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
-	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
-	struct btrfs_bio *bbio = NULL;
-	struct scrub_fixup_nodatasum *fixup;
-	u64 logical = sbio->logical + ix * PAGE_SIZE;
+	struct scrub_dev *sdev = sblock_to_check->sdev;
+	struct btrfs_fs_info *fs_info;
 	u64 length;
-	int i;
+	u64 logical;
+	u64 generation;
+	unsigned int failed_mirror_index;
+	unsigned int is_metadata;
+	unsigned int have_csum;
+	u8 *csum;
+	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
+	struct scrub_block *sblock_bad;
 	int ret;
-	DECLARE_COMPLETION_ONSTACK(complete);
+	int mirror_index;
+	int page_num;
+	int success;
+	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
 
-	if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
-	    (sbio->spag[ix].have_csum == 0)) {
-		fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
-		if (!fixup)
-			goto uncorrectable;
-		fixup->sdev = sdev;
-		fixup->logical = logical;
-		fixup->root = fs_info->extent_root;
-		fixup->mirror_num = sbio->spag[ix].mirror_num;
+	BUG_ON(sblock_to_check->page_count < 1);
+	fs_info = sdev->dev->dev_root->fs_info;
+	length = sblock_to_check->page_count * PAGE_SIZE;
+	logical = sblock_to_check->pagev[0].logical;
+	generation = sblock_to_check->pagev[0].generation;
+	BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
+	failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
+	is_metadata = !(sblock_to_check->pagev[0].flags &
+			BTRFS_EXTENT_FLAG_DATA);
+	have_csum = sblock_to_check->pagev[0].have_csum;
+	csum = sblock_to_check->pagev[0].csum;
+
+	/*
+	 * read all mirrors one after the other. This includes to
+	 * re-read the extent or metadata block that failed (that was
+	 * the cause that this fixup code is called) another time,
+	 * page by page this time in order to know which pages
+	 * caused I/O errors and which ones are good (for all mirrors).
+	 * It is the goal to handle the situation when more than one
+	 * mirror contains I/O errors, but the errors do not
+	 * overlap, i.e. the data can be repaired by selecting the
+	 * pages from those mirrors without I/O error on the
+	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
+	 * would be that mirror #1 has an I/O error on the first page,
+	 * the second page is good, and mirror #2 has an I/O error on
+	 * the second page, but the first page is good.
+	 * Then the first page of the first mirror can be repaired by
+	 * taking the first page of the second mirror, and the
+	 * second page of the second mirror can be repaired by
+	 * copying the contents of the 2nd page of the 1st mirror.
+	 * One more note: if the pages of one mirror contain I/O
+	 * errors, the checksum cannot be verified. In order to get
+	 * the best data for repairing, the first attempt is to find
+	 * a mirror without I/O errors and with a validated checksum.
+	 * Only if this is not possible, the pages are picked from
+	 * mirrors with I/O errors without considering the checksum.
+	 * If the latter is the case, at the end, the checksum of the
+	 * repaired area is verified in order to correctly maintain
+	 * the statistics.
+	 */
+
+	sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
+				     sizeof(*sblocks_for_recheck),
+				     GFP_NOFS);
+	if (!sblocks_for_recheck) {
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.malloc_errors++;
+		sdev->stat.read_errors++;
+		sdev->stat.uncorrectable_errors++;
+		spin_unlock(&sdev->stat_lock);
+		goto out;
+	}
+
+	/* setup the context, map the logical blocks and alloc the pages */
+	ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
+					logical, sblocks_for_recheck);
+	if (ret) {
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.read_errors++;
+		sdev->stat.uncorrectable_errors++;
+		spin_unlock(&sdev->stat_lock);
+		goto out;
+	}
+	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
+	sblock_bad = sblocks_for_recheck + failed_mirror_index;
+
+	/* build and submit the bios for the failed mirror, check checksums */
+	ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
+				  csum, generation, sdev->csum_size);
+	if (ret) {
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.read_errors++;
+		sdev->stat.uncorrectable_errors++;
+		spin_unlock(&sdev->stat_lock);
+		goto out;
+	}
+
+	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
+	    sblock_bad->no_io_error_seen) {
+		/*
+		 * the error disappeared after reading page by page, or
+		 * the area was part of a huge bio and other parts of the
+		 * bio caused I/O errors, or the block layer merged several
+		 * read requests into one and the error is caused by a
+		 * different bio (usually one of the two latter cases is
+		 * the cause)
+		 */
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.unverified_errors++;
+		spin_unlock(&sdev->stat_lock);
+
+		goto out;
+	}
+
+	if (!sblock_bad->no_io_error_seen) {
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.read_errors++;
+		spin_unlock(&sdev->stat_lock);
+		if (__ratelimit(&_rs))
+			scrub_print_warning("i/o error", sblock_to_check);
+	} else if (sblock_bad->checksum_error) {
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.csum_errors++;
+		spin_unlock(&sdev->stat_lock);
+		if (__ratelimit(&_rs))
+			scrub_print_warning("checksum error", sblock_to_check);
+	} else if (sblock_bad->header_error) {
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.verify_errors++;
+		spin_unlock(&sdev->stat_lock);
+		if (__ratelimit(&_rs))
+			scrub_print_warning("checksum/header error",
+					    sblock_to_check);
+	}
+
+	if (sdev->readonly)
+		goto did_not_correct_error;
+
+	if (!is_metadata && !have_csum) {
+		struct scrub_fixup_nodatasum *fixup_nodatasum;
+
+		/*
+		 * !is_metadata and !have_csum, this means that the data
+		 * might not be COW'ed, that it might be modified
+		 * concurrently. The general strategy to work on the
+		 * commit root does not help in the case when COW is not
+		 * used.
+		 */
+		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
+		if (!fixup_nodatasum)
+			goto did_not_correct_error;
+		fixup_nodatasum->sdev = sdev;
+		fixup_nodatasum->logical = logical;
+		fixup_nodatasum->root = fs_info->extent_root;
+		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
 		/*
 		 * increment scrubs_running to prevent cancel requests from
 		 * completing as long as a fixup worker is running. we must also
@@ -649,235 +774,528 @@
 		atomic_inc(&fs_info->scrubs_paused);
 		mutex_unlock(&fs_info->scrub_lock);
 		atomic_inc(&sdev->fixup_cnt);
-		fixup->work.func = scrub_fixup_nodatasum;
-		btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work);
-		return;
-	}
-
-	length = PAGE_SIZE;
-	ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
-			      &bbio, 0);
-	if (ret || !bbio || length < PAGE_SIZE) {
-		printk(KERN_ERR
-		       "scrub_fixup: btrfs_map_block failed us for %llu\n",
-		       (unsigned long long)logical);
-		WARN_ON(1);
-		kfree(bbio);
-		return;
-	}
-
-	if (bbio->num_stripes == 1)
-		/* there aren't any replicas */
-		goto uncorrectable;
-
-	/*
-	 * first find a good copy
-	 */
-	for (i = 0; i < bbio->num_stripes; ++i) {
-		if (i + 1 == sbio->spag[ix].mirror_num)
-			continue;
-
-		if (scrub_fixup_io(READ, bbio->stripes[i].dev->bdev,
-				   bbio->stripes[i].physical >> 9,
-				   sbio->bio->bi_io_vec[ix].bv_page)) {
-			/* I/O-error, this is not a good copy */
-			continue;
-		}
-
-		if (scrub_fixup_check(sbio, ix) == 0)
-			break;
-	}
-	if (i == bbio->num_stripes)
-		goto uncorrectable;
-
-	if (!sdev->readonly) {
-		/*
-		 * bi_io_vec[ix].bv_page now contains good data, write it back
-		 */
-		if (scrub_fixup_io(WRITE, sdev->dev->bdev,
-				   (sbio->physical + ix * PAGE_SIZE) >> 9,
-				   sbio->bio->bi_io_vec[ix].bv_page)) {
-			/* I/O-error, writeback failed, give up */
-			goto uncorrectable;
-		}
-	}
-
-	kfree(bbio);
-	spin_lock(&sdev->stat_lock);
-	++sdev->stat.corrected_errors;
-	spin_unlock(&sdev->stat_lock);
-
-	printk_ratelimited(KERN_ERR "btrfs: fixed up error at logical %llu\n",
-			       (unsigned long long)logical);
-	return;
-
-uncorrectable:
-	kfree(bbio);
-	spin_lock(&sdev->stat_lock);
-	++sdev->stat.uncorrectable_errors;
-	spin_unlock(&sdev->stat_lock);
-
-	printk_ratelimited(KERN_ERR "btrfs: unable to fixup (regular) error at "
-				"logical %llu\n", (unsigned long long)logical);
-}
-
-static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
-			 struct page *page)
-{
-	struct bio *bio = NULL;
-	int ret;
-	DECLARE_COMPLETION_ONSTACK(complete);
-
-	bio = bio_alloc(GFP_NOFS, 1);
-	bio->bi_bdev = bdev;
-	bio->bi_sector = sector;
-	bio_add_page(bio, page, PAGE_SIZE, 0);
-	bio->bi_end_io = scrub_fixup_end_io;
-	bio->bi_private = &complete;
-	btrfsic_submit_bio(rw, bio);
-
-	/* this will also unplug the queue */
-	wait_for_completion(&complete);
-
-	ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
-	bio_put(bio);
-	return ret;
-}
-
-static void scrub_bio_end_io(struct bio *bio, int err)
-{
-	struct scrub_bio *sbio = bio->bi_private;
-	struct scrub_dev *sdev = sbio->sdev;
-	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
-
-	sbio->err = err;
-	sbio->bio = bio;
-
-	btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
-}
-
-static void scrub_checksum(struct btrfs_work *work)
-{
-	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
-	struct scrub_dev *sdev = sbio->sdev;
-	struct page *page;
-	void *buffer;
-	int i;
-	u64 flags;
-	u64 logical;
-	int ret;
-
-	if (sbio->err) {
-		ret = 0;
-		for (i = 0; i < sbio->count; ++i)
-			ret |= scrub_recheck_error(sbio, i);
-		if (!ret) {
-			spin_lock(&sdev->stat_lock);
-			++sdev->stat.unverified_errors;
-			spin_unlock(&sdev->stat_lock);
-		}
-
-		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
-		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
-		sbio->bio->bi_phys_segments = 0;
-		sbio->bio->bi_idx = 0;
-
-		for (i = 0; i < sbio->count; i++) {
-			struct bio_vec *bi;
-			bi = &sbio->bio->bi_io_vec[i];
-			bi->bv_offset = 0;
-			bi->bv_len = PAGE_SIZE;
-		}
+		fixup_nodatasum->work.func = scrub_fixup_nodatasum;
+		btrfs_queue_worker(&fs_info->scrub_workers,
+				   &fixup_nodatasum->work);
 		goto out;
 	}
-	for (i = 0; i < sbio->count; ++i) {
-		page = sbio->bio->bi_io_vec[i].bv_page;
-		buffer = kmap_atomic(page);
-		flags = sbio->spag[i].flags;
-		logical = sbio->logical + i * PAGE_SIZE;
-		ret = 0;
-		if (flags & BTRFS_EXTENT_FLAG_DATA) {
-			ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
-		} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
-			ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
-							logical, buffer);
-		} else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
-			BUG_ON(i);
-			(void)scrub_checksum_super(sbio, buffer);
-		} else {
-			WARN_ON(1);
+
+	/*
+	 * now build and submit the bios for the other mirrors, check
+	 * checksums
+	 */
+	for (mirror_index = 0;
+	     mirror_index < BTRFS_MAX_MIRRORS &&
+	     sblocks_for_recheck[mirror_index].page_count > 0;
+	     mirror_index++) {
+		if (mirror_index == failed_mirror_index)
+			continue;
+
+		/* build and submit the bios, check checksums */
+		ret = scrub_recheck_block(fs_info,
+					  sblocks_for_recheck + mirror_index,
+					  is_metadata, have_csum, csum,
+					  generation, sdev->csum_size);
+		if (ret)
+			goto did_not_correct_error;
+	}
+
+	/*
+	 * first try to pick the mirror which is completely without I/O
+	 * errors and also does not have a checksum error.
+	 * If one is found, and if a checksum is present, the full block
+	 * that is known to contain an error is rewritten. Afterwards
+	 * the block is known to be corrected.
+	 * If a mirror is found which is completely correct, and no
+	 * checksum is present, only those pages are rewritten that had
+	 * an I/O error in the block to be repaired, since it cannot be
+	 * determined, which copy of the other pages is better (and it
+	 * could happen otherwise that a correct page would be
+	 * overwritten by a bad one).
+	 */
+	for (mirror_index = 0;
+	     mirror_index < BTRFS_MAX_MIRRORS &&
+	     sblocks_for_recheck[mirror_index].page_count > 0;
+	     mirror_index++) {
+		struct scrub_block *sblock_other = sblocks_for_recheck +
+						   mirror_index;
+
+		if (!sblock_other->header_error &&
+		    !sblock_other->checksum_error &&
+		    sblock_other->no_io_error_seen) {
+			int force_write = is_metadata || have_csum;
+
+			ret = scrub_repair_block_from_good_copy(sblock_bad,
+								sblock_other,
+								force_write);
+			if (0 == ret)
+				goto corrected_error;
 		}
-		kunmap_atomic(buffer);
-		if (ret) {
-			ret = scrub_recheck_error(sbio, i);
-			if (!ret) {
-				spin_lock(&sdev->stat_lock);
-				++sdev->stat.unverified_errors;
-				spin_unlock(&sdev->stat_lock);
+	}
+
+	/*
+	 * in case of I/O errors in the area that is supposed to be
+	 * repaired, continue by picking good copies of those pages.
+	 * Select the good pages from mirrors to rewrite bad pages from
+	 * the area to fix. Afterwards verify the checksum of the block
+	 * that is supposed to be repaired. This verification step is
+	 * only done for the purpose of statistic counting and for the
+	 * final scrub report, whether errors remain.
+	 * A perfect algorithm could make use of the checksum and try
+	 * all possible combinations of pages from the different mirrors
+	 * until the checksum verification succeeds. For example, when
+	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
+	 * of mirror #2 is readable but the final checksum test fails,
+	 * then the 2nd page of mirror #3 could be tried, whether now
+	 * the final checksum succeedes. But this would be a rare
+	 * exception and is therefore not implemented. At least it is
+	 * avoided that the good copy is overwritten.
+	 * A more useful improvement would be to pick the sectors
+	 * without I/O error based on sector sizes (512 bytes on legacy
+	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
+	 * mirror could be repaired by taking 512 byte of a different
+	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
+	 * area are unreadable.
+	 */
+
+	/* can only fix I/O errors from here on */
+	if (sblock_bad->no_io_error_seen)
+		goto did_not_correct_error;
+
+	success = 1;
+	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
+		struct scrub_page *page_bad = sblock_bad->pagev + page_num;
+
+		if (!page_bad->io_error)
+			continue;
+
+		for (mirror_index = 0;
+		     mirror_index < BTRFS_MAX_MIRRORS &&
+		     sblocks_for_recheck[mirror_index].page_count > 0;
+		     mirror_index++) {
+			struct scrub_block *sblock_other = sblocks_for_recheck +
+							   mirror_index;
+			struct scrub_page *page_other = sblock_other->pagev +
+							page_num;
+
+			if (!page_other->io_error) {
+				ret = scrub_repair_page_from_good_copy(
+					sblock_bad, sblock_other, page_num, 0);
+				if (0 == ret) {
+					page_bad->io_error = 0;
+					break; /* succeeded for this page */
+				}
 			}
 		}
+
+		if (page_bad->io_error) {
+			/* did not find a mirror to copy the page from */
+			success = 0;
+		}
+	}
+
+	if (success) {
+		if (is_metadata || have_csum) {
+			/*
+			 * need to verify the checksum now that all
+			 * sectors on disk are repaired (the write
+			 * request for data to be repaired is on its way).
+			 * Just be lazy and use scrub_recheck_block()
+			 * which re-reads the data before the checksum
+			 * is verified, but most likely the data comes out
+			 * of the page cache.
+			 */
+			ret = scrub_recheck_block(fs_info, sblock_bad,
+						  is_metadata, have_csum, csum,
+						  generation, sdev->csum_size);
+			if (!ret && !sblock_bad->header_error &&
+			    !sblock_bad->checksum_error &&
+			    sblock_bad->no_io_error_seen)
+				goto corrected_error;
+			else
+				goto did_not_correct_error;
+		} else {
+corrected_error:
+			spin_lock(&sdev->stat_lock);
+			sdev->stat.corrected_errors++;
+			spin_unlock(&sdev->stat_lock);
+			printk_ratelimited(KERN_ERR
+				"btrfs: fixed up error at logical %llu on dev %s\n",
+				(unsigned long long)logical, sdev->dev->name);
+		}
+	} else {
+did_not_correct_error:
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.uncorrectable_errors++;
+		spin_unlock(&sdev->stat_lock);
+		printk_ratelimited(KERN_ERR
+			"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
+			(unsigned long long)logical, sdev->dev->name);
 	}
 
 out:
-	scrub_free_bio(sbio->bio);
-	sbio->bio = NULL;
-	spin_lock(&sdev->list_lock);
-	sbio->next_free = sdev->first_free;
-	sdev->first_free = sbio->index;
-	spin_unlock(&sdev->list_lock);
-	atomic_dec(&sdev->in_flight);
-	wake_up(&sdev->list_wait);
+	if (sblocks_for_recheck) {
+		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
+		     mirror_index++) {
+			struct scrub_block *sblock = sblocks_for_recheck +
+						     mirror_index;
+			int page_index;
+
+			for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
+			     page_index++)
+				if (sblock->pagev[page_index].page)
+					__free_page(
+						sblock->pagev[page_index].page);
+		}
+		kfree(sblocks_for_recheck);
+	}
+
+	return 0;
 }
 
-static int scrub_checksum_data(struct scrub_dev *sdev,
-			       struct scrub_page *spag, void *buffer)
+static int scrub_setup_recheck_block(struct scrub_dev *sdev,
+				     struct btrfs_mapping_tree *map_tree,
+				     u64 length, u64 logical,
+				     struct scrub_block *sblocks_for_recheck)
 {
+	int page_index;
+	int mirror_index;
+	int ret;
+
+	/*
+	 * note: the three members sdev, ref_count and outstanding_pages
+	 * are not used (and not set) in the blocks that are used for
+	 * the recheck procedure
+	 */
+
+	page_index = 0;
+	while (length > 0) {
+		u64 sublen = min_t(u64, length, PAGE_SIZE);
+		u64 mapped_length = sublen;
+		struct btrfs_bio *bbio = NULL;
+
+		/*
+		 * with a length of PAGE_SIZE, each returned stripe
+		 * represents one mirror
+		 */
+		ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
+				      &bbio, 0);
+		if (ret || !bbio || mapped_length < sublen) {
+			kfree(bbio);
+			return -EIO;
+		}
+
+		BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
+		for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
+		     mirror_index++) {
+			struct scrub_block *sblock;
+			struct scrub_page *page;
+
+			if (mirror_index >= BTRFS_MAX_MIRRORS)
+				continue;
+
+			sblock = sblocks_for_recheck + mirror_index;
+			page = sblock->pagev + page_index;
+			page->logical = logical;
+			page->physical = bbio->stripes[mirror_index].physical;
+			page->bdev = bbio->stripes[mirror_index].dev->bdev;
+			page->mirror_num = mirror_index + 1;
+			page->page = alloc_page(GFP_NOFS);
+			if (!page->page) {
+				spin_lock(&sdev->stat_lock);
+				sdev->stat.malloc_errors++;
+				spin_unlock(&sdev->stat_lock);
+				return -ENOMEM;
+			}
+			sblock->page_count++;
+		}
+		kfree(bbio);
+		length -= sublen;
+		logical += sublen;
+		page_index++;
+	}
+
+	return 0;
+}
+
+/*
+ * this function will check the on disk data for checksum errors, header
+ * errors and read I/O errors. If any I/O errors happen, the exact pages
+ * which are errored are marked as being bad. The goal is to enable scrub
+ * to take those pages that are not errored from all the mirrors so that
+ * the pages that are errored in the just handled mirror can be repaired.
+ */
+static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
+			       struct scrub_block *sblock, int is_metadata,
+			       int have_csum, u8 *csum, u64 generation,
+			       u16 csum_size)
+{
+	int page_num;
+
+	sblock->no_io_error_seen = 1;
+	sblock->header_error = 0;
+	sblock->checksum_error = 0;
+
+	for (page_num = 0; page_num < sblock->page_count; page_num++) {
+		struct bio *bio;
+		int ret;
+		struct scrub_page *page = sblock->pagev + page_num;
+		DECLARE_COMPLETION_ONSTACK(complete);
+
+		BUG_ON(!page->page);
+		bio = bio_alloc(GFP_NOFS, 1);
+		bio->bi_bdev = page->bdev;
+		bio->bi_sector = page->physical >> 9;
+		bio->bi_end_io = scrub_complete_bio_end_io;
+		bio->bi_private = &complete;
+
+		ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
+		if (PAGE_SIZE != ret) {
+			bio_put(bio);
+			return -EIO;
+		}
+		btrfsic_submit_bio(READ, bio);
+
+		/* this will also unplug the queue */
+		wait_for_completion(&complete);
+
+		page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+		if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+			sblock->no_io_error_seen = 0;
+		bio_put(bio);
+	}
+
+	if (sblock->no_io_error_seen)
+		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
+					     have_csum, csum, generation,
+					     csum_size);
+
+	return 0;
+}
+
+static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
+					 struct scrub_block *sblock,
+					 int is_metadata, int have_csum,
+					 const u8 *csum, u64 generation,
+					 u16 csum_size)
+{
+	int page_num;
+	u8 calculated_csum[BTRFS_CSUM_SIZE];
+	u32 crc = ~(u32)0;
+	struct btrfs_root *root = fs_info->extent_root;
+	void *mapped_buffer;
+
+	BUG_ON(!sblock->pagev[0].page);
+	if (is_metadata) {
+		struct btrfs_header *h;
+
+		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
+		h = (struct btrfs_header *)mapped_buffer;
+
+		if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
+		    generation != le64_to_cpu(h->generation) ||
+		    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
+		    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+			   BTRFS_UUID_SIZE))
+			sblock->header_error = 1;
+		csum = h->csum;
+	} else {
+		if (!have_csum)
+			return;
+
+		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
+	}
+
+	for (page_num = 0;;) {
+		if (page_num == 0 && is_metadata)
+			crc = btrfs_csum_data(root,
+				((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
+				crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
+		else
+			crc = btrfs_csum_data(root, mapped_buffer, crc,
+					      PAGE_SIZE);
+
+		kunmap_atomic(mapped_buffer);
+		page_num++;
+		if (page_num >= sblock->page_count)
+			break;
+		BUG_ON(!sblock->pagev[page_num].page);
+
+		mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
+	}
+
+	btrfs_csum_final(crc, calculated_csum);
+	if (memcmp(calculated_csum, csum, csum_size))
+		sblock->checksum_error = 1;
+}
+
+static void scrub_complete_bio_end_io(struct bio *bio, int err)
+{
+	complete((struct completion *)bio->bi_private);
+}
+
+static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
+					     struct scrub_block *sblock_good,
+					     int force_write)
+{
+	int page_num;
+	int ret = 0;
+
+	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
+		int ret_sub;
+
+		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
+							   sblock_good,
+							   page_num,
+							   force_write);
+		if (ret_sub)
+			ret = ret_sub;
+	}
+
+	return ret;
+}
+
+static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
+					    struct scrub_block *sblock_good,
+					    int page_num, int force_write)
+{
+	struct scrub_page *page_bad = sblock_bad->pagev + page_num;
+	struct scrub_page *page_good = sblock_good->pagev + page_num;
+
+	BUG_ON(sblock_bad->pagev[page_num].page == NULL);
+	BUG_ON(sblock_good->pagev[page_num].page == NULL);
+	if (force_write || sblock_bad->header_error ||
+	    sblock_bad->checksum_error || page_bad->io_error) {
+		struct bio *bio;
+		int ret;
+		DECLARE_COMPLETION_ONSTACK(complete);
+
+		bio = bio_alloc(GFP_NOFS, 1);
+		bio->bi_bdev = page_bad->bdev;
+		bio->bi_sector = page_bad->physical >> 9;
+		bio->bi_end_io = scrub_complete_bio_end_io;
+		bio->bi_private = &complete;
+
+		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
+		if (PAGE_SIZE != ret) {
+			bio_put(bio);
+			return -EIO;
+		}
+		btrfsic_submit_bio(WRITE, bio);
+
+		/* this will also unplug the queue */
+		wait_for_completion(&complete);
+		bio_put(bio);
+	}
+
+	return 0;
+}
+
+static void scrub_checksum(struct scrub_block *sblock)
+{
+	u64 flags;
+	int ret;
+
+	BUG_ON(sblock->page_count < 1);
+	flags = sblock->pagev[0].flags;
+	ret = 0;
+	if (flags & BTRFS_EXTENT_FLAG_DATA)
+		ret = scrub_checksum_data(sblock);
+	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+		ret = scrub_checksum_tree_block(sblock);
+	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
+		(void)scrub_checksum_super(sblock);
+	else
+		WARN_ON(1);
+	if (ret)
+		scrub_handle_errored_block(sblock);
+}
+
+static int scrub_checksum_data(struct scrub_block *sblock)
+{
+	struct scrub_dev *sdev = sblock->sdev;
 	u8 csum[BTRFS_CSUM_SIZE];
+	u8 *on_disk_csum;
+	struct page *page;
+	void *buffer;
 	u32 crc = ~(u32)0;
 	int fail = 0;
 	struct btrfs_root *root = sdev->dev->dev_root;
+	u64 len;
+	int index;
 
-	if (!spag->have_csum)
+	BUG_ON(sblock->page_count < 1);
+	if (!sblock->pagev[0].have_csum)
 		return 0;
 
-	crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
+	on_disk_csum = sblock->pagev[0].csum;
+	page = sblock->pagev[0].page;
+	buffer = kmap_atomic(page);
+
+	len = sdev->sectorsize;
+	index = 0;
+	for (;;) {
+		u64 l = min_t(u64, len, PAGE_SIZE);
+
+		crc = btrfs_csum_data(root, buffer, crc, l);
+		kunmap_atomic(buffer);
+		len -= l;
+		if (len == 0)
+			break;
+		index++;
+		BUG_ON(index >= sblock->page_count);
+		BUG_ON(!sblock->pagev[index].page);
+		page = sblock->pagev[index].page;
+		buffer = kmap_atomic(page);
+	}
+
 	btrfs_csum_final(crc, csum);
-	if (memcmp(csum, spag->csum, sdev->csum_size))
+	if (memcmp(csum, on_disk_csum, sdev->csum_size))
 		fail = 1;
 
-	spin_lock(&sdev->stat_lock);
-	++sdev->stat.data_extents_scrubbed;
-	sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
-	if (fail)
+	if (fail) {
+		spin_lock(&sdev->stat_lock);
 		++sdev->stat.csum_errors;
-	spin_unlock(&sdev->stat_lock);
+		spin_unlock(&sdev->stat_lock);
+	}
 
 	return fail;
 }
 
-static int scrub_checksum_tree_block(struct scrub_dev *sdev,
-				     struct scrub_page *spag, u64 logical,
-				     void *buffer)
+static int scrub_checksum_tree_block(struct scrub_block *sblock)
 {
+	struct scrub_dev *sdev = sblock->sdev;
 	struct btrfs_header *h;
 	struct btrfs_root *root = sdev->dev->dev_root;
 	struct btrfs_fs_info *fs_info = root->fs_info;
-	u8 csum[BTRFS_CSUM_SIZE];
+	u8 calculated_csum[BTRFS_CSUM_SIZE];
+	u8 on_disk_csum[BTRFS_CSUM_SIZE];
+	struct page *page;
+	void *mapped_buffer;
+	u64 mapped_size;
+	void *p;
 	u32 crc = ~(u32)0;
 	int fail = 0;
 	int crc_fail = 0;
+	u64 len;
+	int index;
+
+	BUG_ON(sblock->page_count < 1);
+	page = sblock->pagev[0].page;
+	mapped_buffer = kmap_atomic(page);
+	h = (struct btrfs_header *)mapped_buffer;
+	memcpy(on_disk_csum, h->csum, sdev->csum_size);
 
 	/*
 	 * we don't use the getter functions here, as we
 	 * a) don't have an extent buffer and
 	 * b) the page is already kmapped
 	 */
-	h = (struct btrfs_header *)buffer;
 
-	if (logical != le64_to_cpu(h->bytenr))
+	if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
 		++fail;
 
-	if (spag->generation != le64_to_cpu(h->generation))
+	if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
 		++fail;
 
 	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
@@ -887,51 +1305,99 @@
 		   BTRFS_UUID_SIZE))
 		++fail;
 
-	crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
-			      PAGE_SIZE - BTRFS_CSUM_SIZE);
-	btrfs_csum_final(crc, csum);
-	if (memcmp(csum, h->csum, sdev->csum_size))
+	BUG_ON(sdev->nodesize != sdev->leafsize);
+	len = sdev->nodesize - BTRFS_CSUM_SIZE;
+	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
+	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
+	index = 0;
+	for (;;) {
+		u64 l = min_t(u64, len, mapped_size);
+
+		crc = btrfs_csum_data(root, p, crc, l);
+		kunmap_atomic(mapped_buffer);
+		len -= l;
+		if (len == 0)
+			break;
+		index++;
+		BUG_ON(index >= sblock->page_count);
+		BUG_ON(!sblock->pagev[index].page);
+		page = sblock->pagev[index].page;
+		mapped_buffer = kmap_atomic(page);
+		mapped_size = PAGE_SIZE;
+		p = mapped_buffer;
+	}
+
+	btrfs_csum_final(crc, calculated_csum);
+	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
 		++crc_fail;
 
-	spin_lock(&sdev->stat_lock);
-	++sdev->stat.tree_extents_scrubbed;
-	sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
-	if (crc_fail)
-		++sdev->stat.csum_errors;
-	if (fail)
-		++sdev->stat.verify_errors;
-	spin_unlock(&sdev->stat_lock);
+	if (crc_fail || fail) {
+		spin_lock(&sdev->stat_lock);
+		if (crc_fail)
+			++sdev->stat.csum_errors;
+		if (fail)
+			++sdev->stat.verify_errors;
+		spin_unlock(&sdev->stat_lock);
+	}
 
 	return fail || crc_fail;
 }
 
-static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
+static int scrub_checksum_super(struct scrub_block *sblock)
 {
 	struct btrfs_super_block *s;
-	u64 logical;
-	struct scrub_dev *sdev = sbio->sdev;
+	struct scrub_dev *sdev = sblock->sdev;
 	struct btrfs_root *root = sdev->dev->dev_root;
 	struct btrfs_fs_info *fs_info = root->fs_info;
-	u8 csum[BTRFS_CSUM_SIZE];
+	u8 calculated_csum[BTRFS_CSUM_SIZE];
+	u8 on_disk_csum[BTRFS_CSUM_SIZE];
+	struct page *page;
+	void *mapped_buffer;
+	u64 mapped_size;
+	void *p;
 	u32 crc = ~(u32)0;
 	int fail = 0;
+	u64 len;
+	int index;
 
-	s = (struct btrfs_super_block *)buffer;
-	logical = sbio->logical;
+	BUG_ON(sblock->page_count < 1);
+	page = sblock->pagev[0].page;
+	mapped_buffer = kmap_atomic(page);
+	s = (struct btrfs_super_block *)mapped_buffer;
+	memcpy(on_disk_csum, s->csum, sdev->csum_size);
 
-	if (logical != le64_to_cpu(s->bytenr))
+	if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
 		++fail;
 
-	if (sbio->spag[0].generation != le64_to_cpu(s->generation))
+	if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
 		++fail;
 
 	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
 		++fail;
 
-	crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
-			      PAGE_SIZE - BTRFS_CSUM_SIZE);
-	btrfs_csum_final(crc, csum);
-	if (memcmp(csum, s->csum, sbio->sdev->csum_size))
+	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
+	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
+	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
+	index = 0;
+	for (;;) {
+		u64 l = min_t(u64, len, mapped_size);
+
+		crc = btrfs_csum_data(root, p, crc, l);
+		kunmap_atomic(mapped_buffer);
+		len -= l;
+		if (len == 0)
+			break;
+		index++;
+		BUG_ON(index >= sblock->page_count);
+		BUG_ON(!sblock->pagev[index].page);
+		page = sblock->pagev[index].page;
+		mapped_buffer = kmap_atomic(page);
+		mapped_size = PAGE_SIZE;
+		p = mapped_buffer;
+	}
+
+	btrfs_csum_final(crc, calculated_csum);
+	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
 		++fail;
 
 	if (fail) {
@@ -948,29 +1414,42 @@
 	return fail;
 }
 
-static int scrub_submit(struct scrub_dev *sdev)
+static void scrub_block_get(struct scrub_block *sblock)
+{
+	atomic_inc(&sblock->ref_count);
+}
+
+static void scrub_block_put(struct scrub_block *sblock)
+{
+	if (atomic_dec_and_test(&sblock->ref_count)) {
+		int i;
+
+		for (i = 0; i < sblock->page_count; i++)
+			if (sblock->pagev[i].page)
+				__free_page(sblock->pagev[i].page);
+		kfree(sblock);
+	}
+}
+
+static void scrub_submit(struct scrub_dev *sdev)
 {
 	struct scrub_bio *sbio;
 
 	if (sdev->curr == -1)
-		return 0;
+		return;
 
 	sbio = sdev->bios[sdev->curr];
-	sbio->err = 0;
 	sdev->curr = -1;
 	atomic_inc(&sdev->in_flight);
 
 	btrfsic_submit_bio(READ, sbio->bio);
-
-	return 0;
 }
 
-static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
-		      u64 physical, u64 flags, u64 gen, int mirror_num,
-		      u8 *csum, int force)
+static int scrub_add_page_to_bio(struct scrub_dev *sdev,
+				 struct scrub_page *spage)
 {
+	struct scrub_block *sblock = spage->sblock;
 	struct scrub_bio *sbio;
-	struct page *page;
 	int ret;
 
 again:
@@ -983,7 +1462,7 @@
 		if (sdev->curr != -1) {
 			sdev->first_free = sdev->bios[sdev->curr]->next_free;
 			sdev->bios[sdev->curr]->next_free = -1;
-			sdev->bios[sdev->curr]->count = 0;
+			sdev->bios[sdev->curr]->page_count = 0;
 			spin_unlock(&sdev->list_lock);
 		} else {
 			spin_unlock(&sdev->list_lock);
@@ -991,62 +1470,200 @@
 		}
 	}
 	sbio = sdev->bios[sdev->curr];
-	if (sbio->count == 0) {
+	if (sbio->page_count == 0) {
 		struct bio *bio;
 
-		sbio->physical = physical;
-		sbio->logical = logical;
-		bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
-		if (!bio)
-			return -ENOMEM;
+		sbio->physical = spage->physical;
+		sbio->logical = spage->logical;
+		bio = sbio->bio;
+		if (!bio) {
+			bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
+			if (!bio)
+				return -ENOMEM;
+			sbio->bio = bio;
+		}
 
 		bio->bi_private = sbio;
 		bio->bi_end_io = scrub_bio_end_io;
 		bio->bi_bdev = sdev->dev->bdev;
-		bio->bi_sector = sbio->physical >> 9;
+		bio->bi_sector = spage->physical >> 9;
 		sbio->err = 0;
-		sbio->bio = bio;
-	} else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
-		   sbio->logical + sbio->count * PAGE_SIZE != logical) {
-		ret = scrub_submit(sdev);
-		if (ret)
-			return ret;
-		goto again;
-	}
-	sbio->spag[sbio->count].flags = flags;
-	sbio->spag[sbio->count].generation = gen;
-	sbio->spag[sbio->count].have_csum = 0;
-	sbio->spag[sbio->count].mirror_num = mirror_num;
-
-	page = alloc_page(GFP_NOFS);
-	if (!page)
-		return -ENOMEM;
-
-	ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
-	if (!ret) {
-		__free_page(page);
-		ret = scrub_submit(sdev);
-		if (ret)
-			return ret;
+	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
+		   spage->physical ||
+		   sbio->logical + sbio->page_count * PAGE_SIZE !=
+		   spage->logical) {
+		scrub_submit(sdev);
 		goto again;
 	}
 
-	if (csum) {
-		sbio->spag[sbio->count].have_csum = 1;
-		memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
+	sbio->pagev[sbio->page_count] = spage;
+	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
+	if (ret != PAGE_SIZE) {
+		if (sbio->page_count < 1) {
+			bio_put(sbio->bio);
+			sbio->bio = NULL;
+			return -EIO;
+		}
+		scrub_submit(sdev);
+		goto again;
 	}
-	++sbio->count;
-	if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
-		int ret;
 
-		ret = scrub_submit(sdev);
-		if (ret)
-			return ret;
-	}
+	scrub_block_get(sblock); /* one for the added page */
+	atomic_inc(&sblock->outstanding_pages);
+	sbio->page_count++;
+	if (sbio->page_count == sdev->pages_per_bio)
+		scrub_submit(sdev);
 
 	return 0;
 }
 
+static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
+		       u64 physical, u64 flags, u64 gen, int mirror_num,
+		       u8 *csum, int force)
+{
+	struct scrub_block *sblock;
+	int index;
+
+	sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+	if (!sblock) {
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.malloc_errors++;
+		spin_unlock(&sdev->stat_lock);
+		return -ENOMEM;
+	}
+
+	/* one ref inside this function, plus one for each page later on */
+	atomic_set(&sblock->ref_count, 1);
+	sblock->sdev = sdev;
+	sblock->no_io_error_seen = 1;
+
+	for (index = 0; len > 0; index++) {
+		struct scrub_page *spage = sblock->pagev + index;
+		u64 l = min_t(u64, len, PAGE_SIZE);
+
+		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
+		spage->page = alloc_page(GFP_NOFS);
+		if (!spage->page) {
+			spin_lock(&sdev->stat_lock);
+			sdev->stat.malloc_errors++;
+			spin_unlock(&sdev->stat_lock);
+			while (index > 0) {
+				index--;
+				__free_page(sblock->pagev[index].page);
+			}
+			kfree(sblock);
+			return -ENOMEM;
+		}
+		spage->sblock = sblock;
+		spage->bdev = sdev->dev->bdev;
+		spage->flags = flags;
+		spage->generation = gen;
+		spage->logical = logical;
+		spage->physical = physical;
+		spage->mirror_num = mirror_num;
+		if (csum) {
+			spage->have_csum = 1;
+			memcpy(spage->csum, csum, sdev->csum_size);
+		} else {
+			spage->have_csum = 0;
+		}
+		sblock->page_count++;
+		len -= l;
+		logical += l;
+		physical += l;
+	}
+
+	BUG_ON(sblock->page_count == 0);
+	for (index = 0; index < sblock->page_count; index++) {
+		struct scrub_page *spage = sblock->pagev + index;
+		int ret;
+
+		ret = scrub_add_page_to_bio(sdev, spage);
+		if (ret) {
+			scrub_block_put(sblock);
+			return ret;
+		}
+	}
+
+	if (force)
+		scrub_submit(sdev);
+
+	/* last one frees, either here or in bio completion for last page */
+	scrub_block_put(sblock);
+	return 0;
+}
+
+static void scrub_bio_end_io(struct bio *bio, int err)
+{
+	struct scrub_bio *sbio = bio->bi_private;
+	struct scrub_dev *sdev = sbio->sdev;
+	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+
+	sbio->err = err;
+	sbio->bio = bio;
+
+	btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
+}
+
+static void scrub_bio_end_io_worker(struct btrfs_work *work)
+{
+	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
+	struct scrub_dev *sdev = sbio->sdev;
+	int i;
+
+	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
+	if (sbio->err) {
+		for (i = 0; i < sbio->page_count; i++) {
+			struct scrub_page *spage = sbio->pagev[i];
+
+			spage->io_error = 1;
+			spage->sblock->no_io_error_seen = 0;
+		}
+	}
+
+	/* now complete the scrub_block items that have all pages completed */
+	for (i = 0; i < sbio->page_count; i++) {
+		struct scrub_page *spage = sbio->pagev[i];
+		struct scrub_block *sblock = spage->sblock;
+
+		if (atomic_dec_and_test(&sblock->outstanding_pages))
+			scrub_block_complete(sblock);
+		scrub_block_put(sblock);
+	}
+
+	if (sbio->err) {
+		/* what is this good for??? */
+		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
+		sbio->bio->bi_phys_segments = 0;
+		sbio->bio->bi_idx = 0;
+
+		for (i = 0; i < sbio->page_count; i++) {
+			struct bio_vec *bi;
+			bi = &sbio->bio->bi_io_vec[i];
+			bi->bv_offset = 0;
+			bi->bv_len = PAGE_SIZE;
+		}
+	}
+
+	bio_put(sbio->bio);
+	sbio->bio = NULL;
+	spin_lock(&sdev->list_lock);
+	sbio->next_free = sdev->first_free;
+	sdev->first_free = sbio->index;
+	spin_unlock(&sdev->list_lock);
+	atomic_dec(&sdev->in_flight);
+	wake_up(&sdev->list_wait);
+}
+
+static void scrub_block_complete(struct scrub_block *sblock)
+{
+	if (!sblock->no_io_error_seen)
+		scrub_handle_errored_block(sblock);
+	else
+		scrub_checksum(sblock);
+}
+
 static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
 			   u8 *csum)
 {
@@ -1054,7 +1671,6 @@
 	int ret = 0;
 	unsigned long i;
 	unsigned long num_sectors;
-	u32 sectorsize = sdev->dev->dev_root->sectorsize;
 
 	while (!list_empty(&sdev->csum_list)) {
 		sum = list_first_entry(&sdev->csum_list,
@@ -1072,7 +1688,7 @@
 	if (!sum)
 		return 0;
 
-	num_sectors = sum->len / sectorsize;
+	num_sectors = sum->len / sdev->sectorsize;
 	for (i = 0; i < num_sectors; ++i) {
 		if (sum->sums[i].bytenr == logical) {
 			memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
@@ -1093,9 +1709,28 @@
 {
 	int ret;
 	u8 csum[BTRFS_CSUM_SIZE];
+	u32 blocksize;
+
+	if (flags & BTRFS_EXTENT_FLAG_DATA) {
+		blocksize = sdev->sectorsize;
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.data_extents_scrubbed++;
+		sdev->stat.data_bytes_scrubbed += len;
+		spin_unlock(&sdev->stat_lock);
+	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+		BUG_ON(sdev->nodesize != sdev->leafsize);
+		blocksize = sdev->nodesize;
+		spin_lock(&sdev->stat_lock);
+		sdev->stat.tree_extents_scrubbed++;
+		sdev->stat.tree_bytes_scrubbed += len;
+		spin_unlock(&sdev->stat_lock);
+	} else {
+		blocksize = sdev->sectorsize;
+		BUG_ON(1);
+	}
 
 	while (len) {
-		u64 l = min_t(u64, len, PAGE_SIZE);
+		u64 l = min_t(u64, len, blocksize);
 		int have_csum = 0;
 
 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
@@ -1104,8 +1739,8 @@
 			if (have_csum == 0)
 				++sdev->stat.no_csum;
 		}
-		ret = scrub_page(sdev, logical, l, physical, flags, gen,
-				 mirror_num, have_csum ? csum : NULL, 0);
+		ret = scrub_pages(sdev, logical, l, physical, flags, gen,
+				  mirror_num, have_csum ? csum : NULL, 0);
 		if (ret)
 			return ret;
 		len -= l;
@@ -1170,6 +1805,11 @@
 	if (!path)
 		return -ENOMEM;
 
+	/*
+	 * work on commit root. The related disk blocks are static as
+	 * long as COW is applied. This means, it is save to rewrite
+	 * them to repair disk errors without any race conditions
+	 */
 	path->search_commit_root = 1;
 	path->skip_locking = 1;
 
@@ -1516,15 +2156,18 @@
 	struct btrfs_device *device = sdev->dev;
 	struct btrfs_root *root = device->dev_root;
 
+	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+		return -EIO;
+
 	gen = root->fs_info->last_trans_committed;
 
 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
 		bytenr = btrfs_sb_offset(i);
-		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+		if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
 			break;
 
-		ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
-				 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
+		ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
+				     BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
 		if (ret)
 			return ret;
 	}
@@ -1583,10 +2226,30 @@
 	/*
 	 * check some assumptions
 	 */
-	if (root->sectorsize != PAGE_SIZE ||
-	    root->sectorsize != root->leafsize ||
-	    root->sectorsize != root->nodesize) {
-		printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
+	if (root->nodesize != root->leafsize) {
+		printk(KERN_ERR
+		       "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
+		       root->nodesize, root->leafsize);
+		return -EINVAL;
+	}
+
+	if (root->nodesize > BTRFS_STRIPE_LEN) {
+		/*
+		 * in this case scrub is unable to calculate the checksum
+		 * the way scrub is implemented. Do not handle this
+		 * situation at all because it won't ever happen.
+		 */
+		printk(KERN_ERR
+		       "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
+		       root->nodesize, BTRFS_STRIPE_LEN);
+		return -EINVAL;
+	}
+
+	if (root->sectorsize != PAGE_SIZE) {
+		/* not supported for data w/o checksums */
+		printk(KERN_ERR
+		       "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
+		       root->sectorsize, (unsigned long long)PAGE_SIZE);
 		return -EINVAL;
 	}
 
@@ -1656,7 +2319,7 @@
 	return ret;
 }
 
-int btrfs_scrub_pause(struct btrfs_root *root)
+void btrfs_scrub_pause(struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
 
@@ -1671,34 +2334,28 @@
 		mutex_lock(&fs_info->scrub_lock);
 	}
 	mutex_unlock(&fs_info->scrub_lock);
-
-	return 0;
 }
 
-int btrfs_scrub_continue(struct btrfs_root *root)
+void btrfs_scrub_continue(struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
 
 	atomic_dec(&fs_info->scrub_pause_req);
 	wake_up(&fs_info->scrub_pause_wait);
-	return 0;
 }
 
-int btrfs_scrub_pause_super(struct btrfs_root *root)
+void btrfs_scrub_pause_super(struct btrfs_root *root)
 {
 	down_write(&root->fs_info->scrub_super_lock);
-	return 0;
 }
 
-int btrfs_scrub_continue_super(struct btrfs_root *root)
+void btrfs_scrub_continue_super(struct btrfs_root *root)
 {
 	up_write(&root->fs_info->scrub_super_lock);
-	return 0;
 }
 
-int btrfs_scrub_cancel(struct btrfs_root *root)
+int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 
 	mutex_lock(&fs_info->scrub_lock);
 	if (!atomic_read(&fs_info->scrubs_running)) {
@@ -1719,6 +2376,11 @@
 	return 0;
 }
 
+int btrfs_scrub_cancel(struct btrfs_root *root)
+{
+	return __btrfs_scrub_cancel(root->fs_info);
+}
+
 int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1741,6 +2403,7 @@
 
 	return 0;
 }
+
 int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index bc1f6ad..c6ffa58 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -44,8 +44,9 @@
 #define BTRFS_SETGET_FUNCS(name, type, member, bits)			\
 u##bits btrfs_##name(struct extent_buffer *eb, type *s);		\
 void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);	\
-u##bits btrfs_##name(struct extent_buffer *eb,				\
-				   type *s)				\
+void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token);	\
+u##bits btrfs_token_##name(struct extent_buffer *eb,				\
+			   type *s, struct btrfs_map_token *token)	\
 {									\
 	unsigned long part_offset = (unsigned long)s;			\
 	unsigned long offset = part_offset + offsetof(type, member);	\
@@ -54,9 +55,18 @@
 	char *kaddr;						\
 	unsigned long map_start;				\
 	unsigned long map_len;					\
+	unsigned long mem_len = sizeof(((type *)0)->member);	\
 	u##bits res;						\
+	if (token && token->kaddr && token->offset <= offset &&	\
+	    token->eb == eb &&					\
+	   (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
+		kaddr = token->kaddr;				\
+		p = (type *)(kaddr + part_offset - token->offset);  \
+		res = le##bits##_to_cpu(p->member);		\
+		return res;					\
+	}							\
 	err = map_private_extent_buffer(eb, offset,		\
-			sizeof(((type *)0)->member),		\
+			mem_len,				\
 			&kaddr, &map_start, &map_len);		\
 	if (err) {						\
 		__le##bits leres;				\
@@ -65,10 +75,15 @@
 	}							\
 	p = (type *)(kaddr + part_offset - map_start);		\
 	res = le##bits##_to_cpu(p->member);			\
+	if (token) {						\
+		token->kaddr = kaddr;				\
+		token->offset = map_start;			\
+		token->eb = eb;					\
+	}							\
 	return res;						\
 }									\
-void btrfs_set_##name(struct extent_buffer *eb,				\
-				    type *s, u##bits val)		\
+void btrfs_set_token_##name(struct extent_buffer *eb,				\
+			    type *s, u##bits val, struct btrfs_map_token *token)		\
 {									\
 	unsigned long part_offset = (unsigned long)s;			\
 	unsigned long offset = part_offset + offsetof(type, member);	\
@@ -77,8 +92,17 @@
 	char *kaddr;						\
 	unsigned long map_start;				\
 	unsigned long map_len;					\
+	unsigned long mem_len = sizeof(((type *)0)->member);	\
+	if (token && token->kaddr && token->offset <= offset &&	\
+	    token->eb == eb &&					\
+	   (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
+		kaddr = token->kaddr;				\
+		p = (type *)(kaddr + part_offset - token->offset);  \
+		p->member = cpu_to_le##bits(val);		\
+		return;						\
+	}							\
 	err = map_private_extent_buffer(eb, offset,		\
-			sizeof(((type *)0)->member),		\
+			mem_len,				\
 			&kaddr, &map_start, &map_len);		\
 	if (err) {						\
 		__le##bits val2;				\
@@ -88,7 +112,22 @@
 	}							\
 	p = (type *)(kaddr + part_offset - map_start);		\
 	p->member = cpu_to_le##bits(val);			\
-}
+	if (token) {						\
+		token->kaddr = kaddr;				\
+		token->offset = map_start;			\
+		token->eb = eb;					\
+	}							\
+}								\
+void btrfs_set_##name(struct extent_buffer *eb,			\
+		      type *s, u##bits val)			\
+{								\
+	btrfs_set_token_##name(eb, s, val, NULL);		\
+}								\
+u##bits btrfs_##name(struct extent_buffer *eb,			\
+		      type *s)					\
+{								\
+	return btrfs_token_##name(eb, s, NULL);			\
+}								\
 
 #include "ctree.h"
 
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 81df3fe..8d5d380 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -76,6 +76,9 @@
 	case -EROFS:
 		errstr = "Readonly filesystem";
 		break;
+	case -EEXIST:
+		errstr = "Object already exists";
+		break;
 	default:
 		if (nbuf) {
 			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
@@ -116,6 +119,8 @@
 	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
 		sb->s_flags |= MS_RDONLY;
 		printk(KERN_INFO "btrfs is forced readonly\n");
+		__btrfs_scrub_cancel(fs_info);
+//		WARN_ON(1);
 	}
 }
 
@@ -124,25 +129,132 @@
  * invokes the approciate error response.
  */
 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
-		     unsigned int line, int errno)
+		       unsigned int line, int errno, const char *fmt, ...)
 {
 	struct super_block *sb = fs_info->sb;
 	char nbuf[16];
 	const char *errstr;
+	va_list args;
+	va_start(args, fmt);
 
 	/*
 	 * Special case: if the error is EROFS, and we're already
 	 * under MS_RDONLY, then it is safe here.
 	 */
 	if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
+  		return;
+
+  	errstr = btrfs_decode_error(fs_info, errno, nbuf);
+	if (fmt) {
+		struct va_format vaf = {
+			.fmt = fmt,
+			.va = &args,
+		};
+
+		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
+			sb->s_id, function, line, errstr, &vaf);
+	} else {
+		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
+			sb->s_id, function, line, errstr);
+	}
+
+	/* Don't go through full error handling during mount */
+	if (sb->s_flags & MS_BORN) {
+		save_error_info(fs_info);
+		btrfs_handle_error(fs_info);
+	}
+	va_end(args);
+}
+
+const char *logtypes[] = {
+	"emergency",
+	"alert",
+	"critical",
+	"error",
+	"warning",
+	"notice",
+	"info",
+	"debug",
+};
+
+void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+	struct super_block *sb = fs_info->sb;
+	char lvl[4];
+	struct va_format vaf;
+	va_list args;
+	const char *type = logtypes[4];
+
+	va_start(args, fmt);
+
+	if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
+		strncpy(lvl, fmt, 3);
+		fmt += 3;
+		type = logtypes[fmt[1] - '0'];
+	} else
+		*lvl = '\0';
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf);
+}
+
+/*
+ * We only mark the transaction aborted and then set the file system read-only.
+ * This will prevent new transactions from starting or trying to join this
+ * one.
+ *
+ * This means that error recovery at the call site is limited to freeing
+ * any local memory allocations and passing the error code up without
+ * further cleanup. The transaction should complete as it normally would
+ * in the call path but will return -EIO.
+ *
+ * We'll complete the cleanup in btrfs_end_transaction and
+ * btrfs_commit_transaction.
+ */
+void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root, const char *function,
+			       unsigned int line, int errno)
+{
+	WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted");
+	trans->aborted = errno;
+	/* Nothing used. The other threads that have joined this
+	 * transaction may be able to continue. */
+	if (!trans->blocks_used) {
+		btrfs_printk(root->fs_info, "Aborting unused transaction.\n");
 		return;
+	}
+	trans->transaction->aborted = errno;
+	__btrfs_std_error(root->fs_info, function, line, errno, NULL);
+}
+/*
+ * __btrfs_panic decodes unexpected, fatal errors from the caller,
+ * issues an alert, and either panics or BUGs, depending on mount options.
+ */
+void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+		   unsigned int line, int errno, const char *fmt, ...)
+{
+	char nbuf[16];
+	char *s_id = "<unknown>";
+	const char *errstr;
+	struct va_format vaf = { .fmt = fmt };
+	va_list args;
+
+	if (fs_info)
+		s_id = fs_info->sb->s_id;
+
+	va_start(args, fmt);
+	vaf.va = &args;
 
 	errstr = btrfs_decode_error(fs_info, errno, nbuf);
-	printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
-		sb->s_id, function, line, errstr);
-	save_error_info(fs_info);
+	if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
+		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
+			s_id, function, line, &vaf, errstr);
 
-	btrfs_handle_error(fs_info);
+	printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
+	       s_id, function, line, &vaf, errstr);
+	va_end(args);
+	/* Caller calls BUG() */
 }
 
 static void btrfs_put_super(struct super_block *sb)
@@ -166,7 +278,7 @@
 	Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,
 	Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
 	Opt_check_integrity, Opt_check_integrity_including_extent_data,
-	Opt_check_integrity_print_mask,
+	Opt_check_integrity_print_mask, Opt_fatal_errors,
 	Opt_err,
 };
 
@@ -206,12 +318,14 @@
 	{Opt_check_integrity, "check_int"},
 	{Opt_check_integrity_including_extent_data, "check_int_data"},
 	{Opt_check_integrity_print_mask, "check_int_print_mask=%d"},
+	{Opt_fatal_errors, "fatal_errors=%s"},
 	{Opt_err, NULL},
 };
 
 /*
  * Regular mount options parser.  Everything that is needed only when
  * reading in a new superblock is parsed here.
+ * XXX JDM: This needs to be cleaned up for remount.
  */
 int btrfs_parse_options(struct btrfs_root *root, char *options)
 {
@@ -438,6 +552,18 @@
 			ret = -EINVAL;
 			goto out;
 #endif
+		case Opt_fatal_errors:
+			if (strcmp(args[0].from, "panic") == 0)
+				btrfs_set_opt(info->mount_opt,
+					      PANIC_ON_FATAL_ERROR);
+			else if (strcmp(args[0].from, "bug") == 0)
+				btrfs_clear_opt(info->mount_opt,
+					      PANIC_ON_FATAL_ERROR);
+			else {
+				ret = -EINVAL;
+				goto out;
+			}
+			break;
 		case Opt_err:
 			printk(KERN_INFO "btrfs: unrecognized mount option "
 			       "'%s'\n", p);
@@ -762,6 +888,8 @@
 		seq_puts(seq, ",inode_cache");
 	if (btrfs_test_opt(root, SKIP_BALANCE))
 		seq_puts(seq, ",skip_balance");
+	if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR))
+		seq_puts(seq, ",fatal_errors=panic");
 	return 0;
 }
 
@@ -995,11 +1123,20 @@
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 	struct btrfs_root *root = fs_info->tree_root;
+	unsigned old_flags = sb->s_flags;
+	unsigned long old_opts = fs_info->mount_opt;
+	unsigned long old_compress_type = fs_info->compress_type;
+	u64 old_max_inline = fs_info->max_inline;
+	u64 old_alloc_start = fs_info->alloc_start;
+	int old_thread_pool_size = fs_info->thread_pool_size;
+	unsigned int old_metadata_ratio = fs_info->metadata_ratio;
 	int ret;
 
 	ret = btrfs_parse_options(root, data);
-	if (ret)
-		return -EINVAL;
+	if (ret) {
+		ret = -EINVAL;
+		goto restore;
+	}
 
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		return 0;
@@ -1007,26 +1144,44 @@
 	if (*flags & MS_RDONLY) {
 		sb->s_flags |= MS_RDONLY;
 
-		ret =  btrfs_commit_super(root);
-		WARN_ON(ret);
+		ret = btrfs_commit_super(root);
+		if (ret)
+			goto restore;
 	} else {
 		if (fs_info->fs_devices->rw_devices == 0)
-			return -EACCES;
+			ret = -EACCES;
+			goto restore;
 
 		if (btrfs_super_log_root(fs_info->super_copy) != 0)
-			return -EINVAL;
+			ret = -EINVAL;
+			goto restore;
 
 		ret = btrfs_cleanup_fs_roots(fs_info);
-		WARN_ON(ret);
+		if (ret)
+			goto restore;
 
 		/* recover relocation */
 		ret = btrfs_recover_relocation(root);
-		WARN_ON(ret);
+		if (ret)
+			goto restore;
 
 		sb->s_flags &= ~MS_RDONLY;
 	}
 
 	return 0;
+
+restore:
+	/* We've hit an error - don't reset MS_RDONLY */
+	if (sb->s_flags & MS_RDONLY)
+		old_flags |= MS_RDONLY;
+	sb->s_flags = old_flags;
+	fs_info->mount_opt = old_opts;
+	fs_info->compress_type = old_compress_type;
+	fs_info->max_inline = old_max_inline;
+	fs_info->alloc_start = old_alloc_start;
+	fs_info->thread_pool_size = old_thread_pool_size;
+	fs_info->metadata_ratio = old_metadata_ratio;
+	return ret;
 }
 
 /* Used to sort the devices by max_avail(descending sort) */
@@ -1356,9 +1511,7 @@
 	if (err)
 		return err;
 
-	err = btrfs_init_compress();
-	if (err)
-		goto free_sysfs;
+	btrfs_init_compress();
 
 	err = btrfs_init_cachep();
 	if (err)
@@ -1384,6 +1537,8 @@
 	if (err)
 		goto unregister_ioctl;
 
+	btrfs_init_lockdep();
+
 	printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
 	return 0;
 
@@ -1399,7 +1554,6 @@
 	btrfs_destroy_cachep();
 free_compress:
 	btrfs_exit_compress();
-free_sysfs:
 	btrfs_exit_sysfs();
 	return err;
 }
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 04b77e3..8da29e8 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -31,7 +31,7 @@
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
-static noinline void put_transaction(struct btrfs_transaction *transaction)
+void put_transaction(struct btrfs_transaction *transaction)
 {
 	WARN_ON(atomic_read(&transaction->use_count) == 0);
 	if (atomic_dec_and_test(&transaction->use_count)) {
@@ -58,6 +58,12 @@
 
 	spin_lock(&root->fs_info->trans_lock);
 loop:
+	/* The file system has been taken offline. No new transactions. */
+	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+		spin_unlock(&root->fs_info->trans_lock);
+		return -EROFS;
+	}
+
 	if (root->fs_info->trans_no_join) {
 		if (!nofail) {
 			spin_unlock(&root->fs_info->trans_lock);
@@ -67,6 +73,8 @@
 
 	cur_trans = root->fs_info->running_transaction;
 	if (cur_trans) {
+		if (cur_trans->aborted)
+			return cur_trans->aborted;
 		atomic_inc(&cur_trans->use_count);
 		atomic_inc(&cur_trans->num_writers);
 		cur_trans->num_joined++;
@@ -123,6 +131,7 @@
 	root->fs_info->generation++;
 	cur_trans->transid = root->fs_info->generation;
 	root->fs_info->running_transaction = cur_trans;
+	cur_trans->aborted = 0;
 	spin_unlock(&root->fs_info->trans_lock);
 
 	return 0;
@@ -318,6 +327,7 @@
 	h->use_count = 1;
 	h->block_rsv = NULL;
 	h->orig_rsv = NULL;
+	h->aborted = 0;
 
 	smp_mb();
 	if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -327,8 +337,7 @@
 
 	if (num_bytes) {
 		trace_btrfs_space_reservation(root->fs_info, "transaction",
-					      (u64)(unsigned long)h,
-					      num_bytes, 1);
+					      h->transid, num_bytes, 1);
 		h->block_rsv = &root->fs_info->trans_block_rsv;
 		h->bytes_reserved = num_bytes;
 	}
@@ -440,6 +449,7 @@
 	struct btrfs_transaction *cur_trans = trans->transaction;
 	struct btrfs_block_rsv *rsv = trans->block_rsv;
 	int updates;
+	int err;
 
 	smp_mb();
 	if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
@@ -453,8 +463,11 @@
 
 	updates = trans->delayed_ref_updates;
 	trans->delayed_ref_updates = 0;
-	if (updates)
-		btrfs_run_delayed_refs(trans, root, updates);
+	if (updates) {
+		err = btrfs_run_delayed_refs(trans, root, updates);
+		if (err) /* Error code will also eval true */
+			return err;
+	}
 
 	trans->block_rsv = rsv;
 
@@ -525,6 +538,11 @@
 	if (throttle)
 		btrfs_run_delayed_iputs(root);
 
+	if (trans->aborted ||
+	    root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+		return -EIO;
+	}
+
 	return 0;
 }
 
@@ -690,11 +708,13 @@
 		ret = btrfs_update_root(trans, tree_root,
 					&root->root_key,
 					&root->root_item);
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 
 		old_root_used = btrfs_root_used(&root->root_item);
 		ret = btrfs_write_dirty_block_groups(trans, root);
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 	}
 
 	if (root != root->fs_info->extent_root)
@@ -705,6 +725,10 @@
 
 /*
  * update all the cowonly tree roots on disk
+ *
+ * The error handling in this function may not be obvious. Any of the
+ * failures will cause the file system to go offline. We still need
+ * to clean up the delayed refs.
  */
 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
 					 struct btrfs_root *root)
@@ -715,22 +739,30 @@
 	int ret;
 
 	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 
 	eb = btrfs_lock_root_node(fs_info->tree_root);
-	btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
+	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
+			      0, &eb);
 	btrfs_tree_unlock(eb);
 	free_extent_buffer(eb);
 
+	if (ret)
+		return ret;
+
 	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 
 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
 		next = fs_info->dirty_cowonly_roots.next;
 		list_del_init(next);
 		root = list_entry(next, struct btrfs_root, dirty_list);
 
-		update_cowonly_root(trans, root);
+		ret = update_cowonly_root(trans, root);
+		if (ret)
+			return ret;
 	}
 
 	down_write(&fs_info->extent_commit_sem);
@@ -874,7 +906,7 @@
 
 	new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
 	if (!new_root_item) {
-		pending->error = -ENOMEM;
+		ret = pending->error = -ENOMEM;
 		goto fail;
 	}
 
@@ -911,21 +943,24 @@
 	 * insert the directory item
 	 */
 	ret = btrfs_set_inode_index(parent_inode, &index);
-	BUG_ON(ret);
+	BUG_ON(ret); /* -ENOMEM */
 	ret = btrfs_insert_dir_item(trans, parent_root,
 				dentry->d_name.name, dentry->d_name.len,
 				parent_inode, &key,
 				BTRFS_FT_DIR, index);
-	if (ret) {
+	if (ret == -EEXIST) {
 		pending->error = -EEXIST;
 		dput(parent);
 		goto fail;
+	} else if (ret) {
+		goto abort_trans_dput;
 	}
 
 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
 					 dentry->d_name.len * 2);
 	ret = btrfs_update_inode(trans, parent_root, parent_inode);
-	BUG_ON(ret);
+	if (ret)
+		goto abort_trans_dput;
 
 	/*
 	 * pull in the delayed directory update
@@ -934,7 +969,10 @@
 	 * snapshot
 	 */
 	ret = btrfs_run_delayed_items(trans, root);
-	BUG_ON(ret);
+	if (ret) { /* Transaction aborted */
+		dput(parent);
+		goto fail;
+	}
 
 	record_root_in_trans(trans, root);
 	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
@@ -949,12 +987,21 @@
 	btrfs_set_root_flags(new_root_item, root_flags);
 
 	old = btrfs_lock_root_node(root);
-	btrfs_cow_block(trans, root, old, NULL, 0, &old);
+	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
+	if (ret) {
+		btrfs_tree_unlock(old);
+		free_extent_buffer(old);
+		goto abort_trans_dput;
+	}
+
 	btrfs_set_lock_blocking(old);
 
-	btrfs_copy_root(trans, root, old, &tmp, objectid);
+	ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
+	/* clean up in any case */
 	btrfs_tree_unlock(old);
 	free_extent_buffer(old);
+	if (ret)
+		goto abort_trans_dput;
 
 	/* see comments in should_cow_block() */
 	root->force_cow = 1;
@@ -966,7 +1013,8 @@
 	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
 	btrfs_tree_unlock(tmp);
 	free_extent_buffer(tmp);
-	BUG_ON(ret);
+	if (ret)
+		goto abort_trans_dput;
 
 	/*
 	 * insert root back/forward references
@@ -975,19 +1023,32 @@
 				 parent_root->root_key.objectid,
 				 btrfs_ino(parent_inode), index,
 				 dentry->d_name.name, dentry->d_name.len);
-	BUG_ON(ret);
 	dput(parent);
+	if (ret)
+		goto fail;
 
 	key.offset = (u64)-1;
 	pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
-	BUG_ON(IS_ERR(pending->snap));
+	if (IS_ERR(pending->snap)) {
+		ret = PTR_ERR(pending->snap);
+		goto abort_trans;
+	}
 
-	btrfs_reloc_post_snapshot(trans, pending);
+	ret = btrfs_reloc_post_snapshot(trans, pending);
+	if (ret)
+		goto abort_trans;
+	ret = 0;
 fail:
 	kfree(new_root_item);
 	trans->block_rsv = rsv;
 	btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
-	return 0;
+	return ret;
+
+abort_trans_dput:
+	dput(parent);
+abort_trans:
+	btrfs_abort_transaction(trans, root, ret);
+	goto fail;
 }
 
 /*
@@ -1124,6 +1185,33 @@
 	return 0;
 }
 
+
+static void cleanup_transaction(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root)
+{
+	struct btrfs_transaction *cur_trans = trans->transaction;
+
+	WARN_ON(trans->use_count > 1);
+
+	spin_lock(&root->fs_info->trans_lock);
+	list_del_init(&cur_trans->list);
+	spin_unlock(&root->fs_info->trans_lock);
+
+	btrfs_cleanup_one_transaction(trans->transaction, root);
+
+	put_transaction(cur_trans);
+	put_transaction(cur_trans);
+
+	trace_btrfs_transaction_commit(root);
+
+	btrfs_scrub_continue(root);
+
+	if (current->journal_info == trans)
+		current->journal_info = NULL;
+
+	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+}
+
 /*
  * btrfs_transaction state sequence:
  *    in_commit = 0, blocked = 0  (initial)
@@ -1135,10 +1223,10 @@
 			     struct btrfs_root *root)
 {
 	unsigned long joined = 0;
-	struct btrfs_transaction *cur_trans;
+	struct btrfs_transaction *cur_trans = trans->transaction;
 	struct btrfs_transaction *prev_trans = NULL;
 	DEFINE_WAIT(wait);
-	int ret;
+	int ret = -EIO;
 	int should_grow = 0;
 	unsigned long now = get_seconds();
 	int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
@@ -1148,13 +1236,18 @@
 	btrfs_trans_release_metadata(trans, root);
 	trans->block_rsv = NULL;
 
+	if (cur_trans->aborted)
+		goto cleanup_transaction;
+
 	/* make a pass through all the delayed refs we have so far
 	 * any runnings procs may add more while we are here
 	 */
 	ret = btrfs_run_delayed_refs(trans, root, 0);
-	BUG_ON(ret);
+	if (ret)
+		goto cleanup_transaction;
 
 	cur_trans = trans->transaction;
+
 	/*
 	 * set the flushing flag so procs in this transaction have to
 	 * start sending their work down.
@@ -1162,19 +1255,20 @@
 	cur_trans->delayed_refs.flushing = 1;
 
 	ret = btrfs_run_delayed_refs(trans, root, 0);
-	BUG_ON(ret);
+	if (ret)
+		goto cleanup_transaction;
 
 	spin_lock(&cur_trans->commit_lock);
 	if (cur_trans->in_commit) {
 		spin_unlock(&cur_trans->commit_lock);
 		atomic_inc(&cur_trans->use_count);
-		btrfs_end_transaction(trans, root);
+		ret = btrfs_end_transaction(trans, root);
 
 		wait_for_commit(root, cur_trans);
 
 		put_transaction(cur_trans);
 
-		return 0;
+		return ret;
 	}
 
 	trans->transaction->in_commit = 1;
@@ -1214,12 +1308,12 @@
 
 		if (flush_on_commit || snap_pending) {
 			btrfs_start_delalloc_inodes(root, 1);
-			ret = btrfs_wait_ordered_extents(root, 0, 1);
-			BUG_ON(ret);
+			btrfs_wait_ordered_extents(root, 0, 1);
 		}
 
 		ret = btrfs_run_delayed_items(trans, root);
-		BUG_ON(ret);
+		if (ret)
+			goto cleanup_transaction;
 
 		/*
 		 * rename don't use btrfs_join_transaction, so, once we
@@ -1261,13 +1355,22 @@
 	mutex_lock(&root->fs_info->reloc_mutex);
 
 	ret = btrfs_run_delayed_items(trans, root);
-	BUG_ON(ret);
+	if (ret) {
+		mutex_unlock(&root->fs_info->reloc_mutex);
+		goto cleanup_transaction;
+	}
 
 	ret = create_pending_snapshots(trans, root->fs_info);
-	BUG_ON(ret);
+	if (ret) {
+		mutex_unlock(&root->fs_info->reloc_mutex);
+		goto cleanup_transaction;
+	}
 
 	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-	BUG_ON(ret);
+	if (ret) {
+		mutex_unlock(&root->fs_info->reloc_mutex);
+		goto cleanup_transaction;
+	}
 
 	/*
 	 * make sure none of the code above managed to slip in a
@@ -1294,7 +1397,10 @@
 	mutex_lock(&root->fs_info->tree_log_mutex);
 
 	ret = commit_fs_roots(trans, root);
-	BUG_ON(ret);
+	if (ret) {
+		mutex_unlock(&root->fs_info->tree_log_mutex);
+		goto cleanup_transaction;
+	}
 
 	/* commit_fs_roots gets rid of all the tree log roots, it is now
 	 * safe to free the root of tree log roots
@@ -1302,7 +1408,10 @@
 	btrfs_free_log_root_tree(trans, root->fs_info);
 
 	ret = commit_cowonly_roots(trans, root);
-	BUG_ON(ret);
+	if (ret) {
+		mutex_unlock(&root->fs_info->tree_log_mutex);
+		goto cleanup_transaction;
+	}
 
 	btrfs_prepare_extent_commit(trans, root);
 
@@ -1336,8 +1445,18 @@
 	wake_up(&root->fs_info->transaction_wait);
 
 	ret = btrfs_write_and_wait_transaction(trans, root);
-	BUG_ON(ret);
-	write_ctree_super(trans, root, 0);
+	if (ret) {
+		btrfs_error(root->fs_info, ret,
+			    "Error while writing out transaction.");
+		mutex_unlock(&root->fs_info->tree_log_mutex);
+		goto cleanup_transaction;
+	}
+
+	ret = write_ctree_super(trans, root, 0);
+	if (ret) {
+		mutex_unlock(&root->fs_info->tree_log_mutex);
+		goto cleanup_transaction;
+	}
 
 	/*
 	 * the super is written, we can safely allow the tree-loggers
@@ -1373,6 +1492,15 @@
 		btrfs_run_delayed_iputs(root);
 
 	return ret;
+
+cleanup_transaction:
+	btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
+//	WARN_ON(1);
+	if (current->journal_info == trans)
+		current->journal_info = NULL;
+	cleanup_transaction(trans, root);
+
+	return ret;
 }
 
 /*
@@ -1388,6 +1516,8 @@
 	spin_unlock(&fs_info->trans_lock);
 
 	while (!list_empty(&list)) {
+		int ret;
+
 		root = list_entry(list.next, struct btrfs_root, root_list);
 		list_del(&root->root_list);
 
@@ -1395,9 +1525,10 @@
 
 		if (btrfs_header_backref_rev(root->node) <
 		    BTRFS_MIXED_BACKREF_REV)
-			btrfs_drop_snapshot(root, NULL, 0, 0);
+			ret = btrfs_drop_snapshot(root, NULL, 0, 0);
 		else
-			btrfs_drop_snapshot(root, NULL, 1, 0);
+			ret =btrfs_drop_snapshot(root, NULL, 1, 0);
+		BUG_ON(ret < 0);
 	}
 	return 0;
 }
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 02564e6..fe27379 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -43,6 +43,7 @@
 	wait_queue_head_t commit_wait;
 	struct list_head pending_snapshots;
 	struct btrfs_delayed_ref_root delayed_refs;
+	int aborted;
 };
 
 struct btrfs_trans_handle {
@@ -55,6 +56,7 @@
 	struct btrfs_transaction *transaction;
 	struct btrfs_block_rsv *block_rsv;
 	struct btrfs_block_rsv *orig_rsv;
+	int aborted;
 };
 
 struct btrfs_pending_snapshot {
@@ -114,4 +116,5 @@
 				struct extent_io_tree *dirty_pages, int mark);
 int btrfs_transaction_blocked(struct btrfs_fs_info *info);
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
+void put_transaction(struct btrfs_transaction *transaction);
 #endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 966cc74..d017283 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -212,14 +212,13 @@
  * indicate we're done making changes to the log tree
  * and wake up anyone waiting to do a sync
  */
-int btrfs_end_log_trans(struct btrfs_root *root)
+void btrfs_end_log_trans(struct btrfs_root *root)
 {
 	if (atomic_dec_and_test(&root->log_writers)) {
 		smp_mb();
 		if (waitqueue_active(&root->log_writer_wait))
 			wake_up(&root->log_writer_wait);
 	}
-	return 0;
 }
 
 
@@ -378,12 +377,11 @@
 		u32 found_size;
 		found_size = btrfs_item_size_nr(path->nodes[0],
 						path->slots[0]);
-		if (found_size > item_size) {
+		if (found_size > item_size)
 			btrfs_truncate_item(trans, root, path, item_size, 1);
-		} else if (found_size < item_size) {
-			ret = btrfs_extend_item(trans, root, path,
-						item_size - found_size);
-		}
+		else if (found_size < item_size)
+			btrfs_extend_item(trans, root, path,
+					  item_size - found_size);
 	} else if (ret) {
 		return ret;
 	}
@@ -1763,7 +1761,7 @@
 					BTRFS_TREE_LOG_OBJECTID);
 				ret = btrfs_free_and_pin_reserved_extent(root,
 							 bytenr, blocksize);
-				BUG_ON(ret);
+				BUG_ON(ret); /* -ENOMEM or logic errors */
 			}
 			free_extent_buffer(next);
 			continue;
@@ -1871,20 +1869,26 @@
 		wret = walk_down_log_tree(trans, log, path, &level, wc);
 		if (wret > 0)
 			break;
-		if (wret < 0)
+		if (wret < 0) {
 			ret = wret;
+			goto out;
+		}
 
 		wret = walk_up_log_tree(trans, log, path, &level, wc);
 		if (wret > 0)
 			break;
-		if (wret < 0)
+		if (wret < 0) {
 			ret = wret;
+			goto out;
+		}
 	}
 
 	/* was the root node processed? if not, catch it here */
 	if (path->nodes[orig_level]) {
-		wc->process_func(log, path->nodes[orig_level], wc,
+		ret = wc->process_func(log, path->nodes[orig_level], wc,
 			 btrfs_header_generation(path->nodes[orig_level]));
+		if (ret)
+			goto out;
 		if (wc->free) {
 			struct extent_buffer *next;
 
@@ -1900,10 +1904,11 @@
 				BTRFS_TREE_LOG_OBJECTID);
 			ret = btrfs_free_and_pin_reserved_extent(log, next->start,
 							 next->len);
-			BUG_ON(ret);
+			BUG_ON(ret); /* -ENOMEM or logic errors */
 		}
 	}
 
+out:
 	for (i = 0; i <= orig_level; i++) {
 		if (path->nodes[i]) {
 			free_extent_buffer(path->nodes[i]);
@@ -1963,8 +1968,8 @@
 	return 0;
 }
 
-static int wait_for_writer(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root)
+static void wait_for_writer(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root)
 {
 	DEFINE_WAIT(wait);
 	while (root->fs_info->last_trans_log_full_commit !=
@@ -1978,7 +1983,6 @@
 		mutex_lock(&root->log_mutex);
 		finish_wait(&root->log_writer_wait, &wait);
 	}
-	return 0;
 }
 
 /*
@@ -2046,7 +2050,11 @@
 	 * wait for them until later.
 	 */
 	ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		mutex_unlock(&root->log_mutex);
+		goto out;
+	}
 
 	btrfs_set_root_node(&log->root_item, log->node);
 
@@ -2077,7 +2085,11 @@
 	}
 
 	if (ret) {
-		BUG_ON(ret != -ENOSPC);
+		if (ret != -ENOSPC) {
+			btrfs_abort_transaction(trans, root, ret);
+			mutex_unlock(&log_root_tree->log_mutex);
+			goto out;
+		}
 		root->fs_info->last_trans_log_full_commit = trans->transid;
 		btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
 		mutex_unlock(&log_root_tree->log_mutex);
@@ -2117,7 +2129,11 @@
 	ret = btrfs_write_and_wait_marked_extents(log_root_tree,
 				&log_root_tree->dirty_log_pages,
 				EXTENT_DIRTY | EXTENT_NEW);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		mutex_unlock(&log_root_tree->log_mutex);
+		goto out_wake_log_root;
+	}
 	btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
 
 	btrfs_set_super_log_root(root->fs_info->super_for_commit,
@@ -2326,7 +2342,9 @@
 	if (ret == -ENOSPC) {
 		root->fs_info->last_trans_log_full_commit = trans->transid;
 		ret = 0;
-	}
+	} else if (ret < 0)
+		btrfs_abort_transaction(trans, root, ret);
+
 	btrfs_end_log_trans(root);
 
 	return err;
@@ -2357,7 +2375,8 @@
 	if (ret == -ENOSPC) {
 		root->fs_info->last_trans_log_full_commit = trans->transid;
 		ret = 0;
-	}
+	} else if (ret < 0 && ret != -ENOENT)
+		btrfs_abort_transaction(trans, root, ret);
 	btrfs_end_log_trans(root);
 
 	return ret;
@@ -3169,13 +3188,20 @@
 	fs_info->log_root_recovering = 1;
 
 	trans = btrfs_start_transaction(fs_info->tree_root, 0);
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans)) {
+		ret = PTR_ERR(trans);
+		goto error;
+	}
 
 	wc.trans = trans;
 	wc.pin = 1;
 
 	ret = walk_log_tree(trans, log_root_tree, &wc);
-	BUG_ON(ret);
+	if (ret) {
+		btrfs_error(fs_info, ret, "Failed to pin buffers while "
+			    "recovering log root tree.");
+		goto error;
+	}
 
 again:
 	key.objectid = BTRFS_TREE_LOG_OBJECTID;
@@ -3184,8 +3210,12 @@
 
 	while (1) {
 		ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
-		if (ret < 0)
-			break;
+
+		if (ret < 0) {
+			btrfs_error(fs_info, ret,
+				    "Couldn't find tree log root.");
+			goto error;
+		}
 		if (ret > 0) {
 			if (path->slots[0] == 0)
 				break;
@@ -3199,14 +3229,24 @@
 
 		log = btrfs_read_fs_root_no_radix(log_root_tree,
 						  &found_key);
-		BUG_ON(IS_ERR(log));
+		if (IS_ERR(log)) {
+			ret = PTR_ERR(log);
+			btrfs_error(fs_info, ret,
+				    "Couldn't read tree log root.");
+			goto error;
+		}
 
 		tmp_key.objectid = found_key.offset;
 		tmp_key.type = BTRFS_ROOT_ITEM_KEY;
 		tmp_key.offset = (u64)-1;
 
 		wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
-		BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
+		if (IS_ERR(wc.replay_dest)) {
+			ret = PTR_ERR(wc.replay_dest);
+			btrfs_error(fs_info, ret, "Couldn't read target root "
+				    "for tree log recovery.");
+			goto error;
+		}
 
 		wc.replay_dest->log_root = log;
 		btrfs_record_root_in_trans(trans, wc.replay_dest);
@@ -3254,6 +3294,10 @@
 
 	kfree(log_root_tree);
 	return 0;
+
+error:
+	btrfs_free_path(path);
+	return ret;
 }
 
 /*
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 2270ac5..862ac81 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -38,7 +38,7 @@
 			       struct btrfs_root *root,
 			       const char *name, int name_len,
 			       struct inode *inode, u64 dirid);
-int btrfs_end_log_trans(struct btrfs_root *root);
+void btrfs_end_log_trans(struct btrfs_root *root);
 int btrfs_pin_log_trans(struct btrfs_root *root);
 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 		    struct btrfs_root *root, struct inode *inode,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ef41f28..a872b48 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -67,7 +67,7 @@
 	kfree(fs_devices);
 }
 
-int btrfs_cleanup_fs_uuids(void)
+void btrfs_cleanup_fs_uuids(void)
 {
 	struct btrfs_fs_devices *fs_devices;
 
@@ -77,7 +77,6 @@
 		list_del(&fs_devices->list);
 		free_fs_devices(fs_devices);
 	}
-	return 0;
 }
 
 static noinline struct btrfs_device *__find_device(struct list_head *head,
@@ -130,7 +129,7 @@
  * the list if the block device is congested.  This way, multiple devices
  * can make progress from a single worker thread.
  */
-static noinline int run_scheduled_bios(struct btrfs_device *device)
+static noinline void run_scheduled_bios(struct btrfs_device *device)
 {
 	struct bio *pending;
 	struct backing_dev_info *bdi;
@@ -316,7 +315,6 @@
 
 done:
 	blk_finish_plug(&plug);
-	return 0;
 }
 
 static void pending_bios_fn(struct btrfs_work *work)
@@ -455,7 +453,7 @@
 	return ERR_PTR(-ENOMEM);
 }
 
-int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
+void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
 {
 	struct btrfs_device *device, *next;
 
@@ -503,7 +501,6 @@
 	fs_devices->latest_trans = latest_transid;
 
 	mutex_unlock(&uuid_mutex);
-	return 0;
 }
 
 static void __free_device(struct work_struct *work)
@@ -552,10 +549,10 @@
 			fs_devices->num_can_discard--;
 
 		new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
-		BUG_ON(!new_device);
+		BUG_ON(!new_device); /* -ENOMEM */
 		memcpy(new_device, device, sizeof(*new_device));
 		new_device->name = kstrdup(device->name, GFP_NOFS);
-		BUG_ON(device->name && !new_device->name);
+		BUG_ON(device->name && !new_device->name); /* -ENOMEM */
 		new_device->bdev = NULL;
 		new_device->writeable = 0;
 		new_device->in_fs_metadata = 0;
@@ -625,6 +622,8 @@
 			printk(KERN_INFO "open %s failed\n", device->name);
 			goto error;
 		}
+		filemap_write_and_wait(bdev->bd_inode->i_mapping);
+		invalidate_bdev(bdev);
 		set_blocksize(bdev, 4096);
 
 		bh = btrfs_read_dev_super(bdev);
@@ -1039,8 +1038,10 @@
 		leaf = path->nodes[0];
 		extent = btrfs_item_ptr(leaf, path->slots[0],
 					struct btrfs_dev_extent);
+	} else {
+		btrfs_error(root->fs_info, ret, "Slot search failed");
+		goto out;
 	}
-	BUG_ON(ret);
 
 	if (device->bytes_used > 0) {
 		u64 len = btrfs_dev_extent_length(leaf, extent);
@@ -1050,7 +1051,10 @@
 		spin_unlock(&root->fs_info->free_chunk_lock);
 	}
 	ret = btrfs_del_item(trans, root, path);
-
+	if (ret) {
+		btrfs_error(root->fs_info, ret,
+			    "Failed to remove dev extent item");
+	}
 out:
 	btrfs_free_path(path);
 	return ret;
@@ -1078,7 +1082,8 @@
 	key.type = BTRFS_DEV_EXTENT_KEY;
 	ret = btrfs_insert_empty_item(trans, root, path, &key,
 				      sizeof(*extent));
-	BUG_ON(ret);
+	if (ret)
+		goto out;
 
 	leaf = path->nodes[0];
 	extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1093,6 +1098,7 @@
 
 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
 	btrfs_mark_buffer_dirty(leaf);
+out:
 	btrfs_free_path(path);
 	return ret;
 }
@@ -1118,7 +1124,7 @@
 	if (ret < 0)
 		goto error;
 
-	BUG_ON(ret == 0);
+	BUG_ON(ret == 0); /* Corruption */
 
 	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
 	if (ret) {
@@ -1162,7 +1168,7 @@
 	if (ret < 0)
 		goto error;
 
-	BUG_ON(ret == 0);
+	BUG_ON(ret == 0); /* Corruption */
 
 	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
 				  BTRFS_DEV_ITEM_KEY);
@@ -1350,6 +1356,7 @@
 		}
 
 		set_blocksize(bdev, 4096);
+		invalidate_bdev(bdev);
 		bh = btrfs_read_dev_super(bdev);
 		if (!bh) {
 			ret = -EINVAL;
@@ -1596,7 +1603,7 @@
 				   (unsigned long)btrfs_device_fsid(dev_item),
 				   BTRFS_UUID_SIZE);
 		device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
-		BUG_ON(!device);
+		BUG_ON(!device); /* Logic error */
 
 		if (device->fs_devices->seeding) {
 			btrfs_set_device_generation(leaf, dev_item,
@@ -1706,7 +1713,7 @@
 	if (seeding_dev) {
 		sb->s_flags &= ~MS_RDONLY;
 		ret = btrfs_prepare_sprout(root);
-		BUG_ON(ret);
+		BUG_ON(ret); /* -ENOMEM */
 	}
 
 	device->fs_devices = root->fs_info->fs_devices;
@@ -1744,11 +1751,15 @@
 
 	if (seeding_dev) {
 		ret = init_first_rw_device(trans, root, device);
-		BUG_ON(ret);
+		if (ret)
+			goto error_trans;
 		ret = btrfs_finish_sprout(trans, root);
-		BUG_ON(ret);
+		if (ret)
+			goto error_trans;
 	} else {
 		ret = btrfs_add_device(trans, root, device);
+		if (ret)
+			goto error_trans;
 	}
 
 	/*
@@ -1758,17 +1769,31 @@
 	btrfs_clear_space_info_full(root->fs_info);
 
 	unlock_chunks(root);
-	btrfs_commit_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans, root);
 
 	if (seeding_dev) {
 		mutex_unlock(&uuid_mutex);
 		up_write(&sb->s_umount);
 
+		if (ret) /* transaction commit */
+			return ret;
+
 		ret = btrfs_relocate_sys_chunks(root);
-		BUG_ON(ret);
+		if (ret < 0)
+			btrfs_error(root->fs_info, ret,
+				    "Failed to relocate sys chunks after "
+				    "device initialization. This can be fixed "
+				    "using the \"btrfs balance\" command.");
 	}
 
 	return ret;
+
+error_trans:
+	unlock_chunks(root);
+	btrfs_abort_transaction(trans, root, ret);
+	btrfs_end_transaction(trans, root);
+	kfree(device->name);
+	kfree(device);
 error:
 	blkdev_put(bdev, FMODE_EXCL);
 	if (seeding_dev) {
@@ -1876,10 +1901,20 @@
 	key.type = BTRFS_CHUNK_ITEM_KEY;
 
 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-	BUG_ON(ret);
+	if (ret < 0)
+		goto out;
+	else if (ret > 0) { /* Logic error or corruption */
+		btrfs_error(root->fs_info, -ENOENT,
+			    "Failed lookup while freeing chunk.");
+		ret = -ENOENT;
+		goto out;
+	}
 
 	ret = btrfs_del_item(trans, root, path);
-
+	if (ret < 0)
+		btrfs_error(root->fs_info, ret,
+			    "Failed to delete chunk item.");
+out:
 	btrfs_free_path(path);
 	return ret;
 }
@@ -2041,7 +2076,7 @@
 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
 		if (ret < 0)
 			goto error;
-		BUG_ON(ret == 0);
+		BUG_ON(ret == 0); /* Corruption */
 
 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
 					  key.type);
@@ -2250,15 +2285,13 @@
  * Balance filters.  Return 1 if chunk should be filtered out
  * (should not be balanced).
  */
-static int chunk_profiles_filter(u64 chunk_profile,
+static int chunk_profiles_filter(u64 chunk_type,
 				 struct btrfs_balance_args *bargs)
 {
-	chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
+	chunk_type = chunk_to_extended(chunk_type) &
+				BTRFS_EXTENDED_PROFILE_MASK;
 
-	if (chunk_profile == 0)
-		chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-
-	if (bargs->profiles & chunk_profile)
+	if (bargs->profiles & chunk_type)
 		return 0;
 
 	return 1;
@@ -2365,18 +2398,16 @@
 	return 1;
 }
 
-static int chunk_soft_convert_filter(u64 chunk_profile,
+static int chunk_soft_convert_filter(u64 chunk_type,
 				     struct btrfs_balance_args *bargs)
 {
 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
 		return 0;
 
-	chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
+	chunk_type = chunk_to_extended(chunk_type) &
+				BTRFS_EXTENDED_PROFILE_MASK;
 
-	if (chunk_profile == 0)
-		chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-
-	if (bargs->target & chunk_profile)
+	if (bargs->target == chunk_type)
 		return 1;
 
 	return 0;
@@ -2602,6 +2633,30 @@
 	return ret;
 }
 
+/**
+ * alloc_profile_is_valid - see if a given profile is valid and reduced
+ * @flags: profile to validate
+ * @extended: if true @flags is treated as an extended profile
+ */
+static int alloc_profile_is_valid(u64 flags, int extended)
+{
+	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
+			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
+
+	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+	/* 1) check that all other bits are zeroed */
+	if (flags & ~mask)
+		return 0;
+
+	/* 2) see if profile is reduced */
+	if (flags == 0)
+		return !extended; /* "0" is valid for usual profiles */
+
+	/* true if exactly one bit set */
+	return (flags & (flags - 1)) == 0;
+}
+
 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
 {
 	/* cancel requested || normal exit path */
@@ -2630,6 +2685,7 @@
 {
 	struct btrfs_fs_info *fs_info = bctl->fs_info;
 	u64 allowed;
+	int mixed = 0;
 	int ret;
 
 	if (btrfs_fs_closing(fs_info) ||
@@ -2639,13 +2695,16 @@
 		goto out;
 	}
 
+	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
+	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+		mixed = 1;
+
 	/*
 	 * In case of mixed groups both data and meta should be picked,
 	 * and identical options should be given for both of them.
 	 */
-	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
-	if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
-	    (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
+	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
+	if (mixed && (bctl->flags & allowed)) {
 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
@@ -2656,14 +2715,6 @@
 		}
 	}
 
-	/*
-	 * Profile changing sanity checks.  Skip them if a simple
-	 * balance is requested.
-	 */
-	if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
-	      BTRFS_BALANCE_ARGS_CONVERT))
-		goto do_balance;
-
 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
 	if (fs_info->fs_devices->num_devices == 1)
 		allowed |= BTRFS_BLOCK_GROUP_DUP;
@@ -2673,24 +2724,27 @@
 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
 				BTRFS_BLOCK_GROUP_RAID10);
 
-	if (!profile_is_valid(bctl->data.target, 1) ||
-	    bctl->data.target & ~allowed) {
+	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
+	     (bctl->data.target & ~allowed))) {
 		printk(KERN_ERR "btrfs: unable to start balance with target "
 		       "data profile %llu\n",
 		       (unsigned long long)bctl->data.target);
 		ret = -EINVAL;
 		goto out;
 	}
-	if (!profile_is_valid(bctl->meta.target, 1) ||
-	    bctl->meta.target & ~allowed) {
+	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
+	     (bctl->meta.target & ~allowed))) {
 		printk(KERN_ERR "btrfs: unable to start balance with target "
 		       "metadata profile %llu\n",
 		       (unsigned long long)bctl->meta.target);
 		ret = -EINVAL;
 		goto out;
 	}
-	if (!profile_is_valid(bctl->sys.target, 1) ||
-	    bctl->sys.target & ~allowed) {
+	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
+	     (bctl->sys.target & ~allowed))) {
 		printk(KERN_ERR "btrfs: unable to start balance with target "
 		       "system profile %llu\n",
 		       (unsigned long long)bctl->sys.target);
@@ -2698,7 +2752,9 @@
 		goto out;
 	}
 
-	if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
+	/* allow dup'ed data chunks only in mixed mode */
+	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
 		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
 		ret = -EINVAL;
 		goto out;
@@ -2724,7 +2780,6 @@
 		}
 	}
 
-do_balance:
 	ret = insert_balance_item(fs_info->tree_root, bctl);
 	if (ret && ret != -EEXIST)
 		goto out;
@@ -2967,7 +3022,7 @@
 	key.offset = (u64)-1;
 	key.type = BTRFS_DEV_EXTENT_KEY;
 
-	while (1) {
+	do {
 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 		if (ret < 0)
 			goto done;
@@ -3009,8 +3064,7 @@
 			goto done;
 		if (ret == -ENOSPC)
 			failed++;
-		key.offset -= 1;
-	}
+	} while (key.offset-- > 0);
 
 	if (failed && !retried) {
 		failed = 0;
@@ -3128,11 +3182,7 @@
 	int i;
 	int j;
 
-	if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
-	    (type & BTRFS_BLOCK_GROUP_DUP)) {
-		WARN_ON(1);
-		type &= ~BTRFS_BLOCK_GROUP_DUP;
-	}
+	BUG_ON(!alloc_profile_is_valid(type, 0));
 
 	if (list_empty(&fs_devices->alloc_list))
 		return -ENOSPC;
@@ -3328,13 +3378,15 @@
 	write_lock(&em_tree->lock);
 	ret = add_extent_mapping(em_tree, em);
 	write_unlock(&em_tree->lock);
-	BUG_ON(ret);
 	free_extent_map(em);
+	if (ret)
+		goto error;
 
 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
 				     start, num_bytes);
-	BUG_ON(ret);
+	if (ret)
+		goto error;
 
 	for (i = 0; i < map->num_stripes; ++i) {
 		struct btrfs_device *device;
@@ -3347,7 +3399,10 @@
 				info->chunk_root->root_key.objectid,
 				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
 				start, dev_offset, stripe_size);
-		BUG_ON(ret);
+		if (ret) {
+			btrfs_abort_transaction(trans, extent_root, ret);
+			goto error;
+		}
 	}
 
 	kfree(devices_info);
@@ -3383,7 +3438,8 @@
 		device = map->stripes[index].dev;
 		device->bytes_used += stripe_size;
 		ret = btrfs_update_device(trans, device);
-		BUG_ON(ret);
+		if (ret)
+			goto out_free;
 		index++;
 	}
 
@@ -3420,16 +3476,19 @@
 	key.offset = chunk_offset;
 
 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
-	BUG_ON(ret);
 
-	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+		/*
+		 * TODO: Cleanup of inserted chunk root in case of
+		 * failure.
+		 */
 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
 					     item_size);
-		BUG_ON(ret);
 	}
 
+out_free:
 	kfree(chunk);
-	return 0;
+	return ret;
 }
 
 /*
@@ -3461,7 +3520,8 @@
 
 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
 				   chunk_size, stripe_size);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 	return 0;
 }
 
@@ -3493,7 +3553,8 @@
 
 	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
 				  &stripe_size, chunk_offset, alloc_profile);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 
 	sys_chunk_offset = chunk_offset + chunk_size;
 
@@ -3504,10 +3565,12 @@
 	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
 				  &sys_chunk_size, &sys_stripe_size,
 				  sys_chunk_offset, alloc_profile);
-	BUG_ON(ret);
+	if (ret)
+		goto abort;
 
 	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
-	BUG_ON(ret);
+	if (ret)
+		goto abort;
 
 	/*
 	 * Modifying chunk tree needs allocating new blocks from both
@@ -3517,13 +3580,20 @@
 	 */
 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
 				   chunk_size, stripe_size);
-	BUG_ON(ret);
+	if (ret)
+		goto abort;
 
 	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
 				   sys_chunk_offset, sys_chunk_size,
 				   sys_stripe_size);
-	BUG_ON(ret);
+	if (ret)
+		goto abort;
+
 	return 0;
+
+abort:
+	btrfs_abort_transaction(trans, root, ret);
+	return ret;
 }
 
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
@@ -3874,7 +3944,7 @@
 		do_div(length, map->num_stripes);
 
 	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
-	BUG_ON(!buf);
+	BUG_ON(!buf); /* -ENOMEM */
 
 	for (i = 0; i < map->num_stripes; i++) {
 		if (devid && map->stripes[i].dev->devid != devid)
@@ -3967,7 +4037,7 @@
  * This will add one bio to the pending list for a device and make sure
  * the work struct is scheduled.
  */
-static noinline int schedule_bio(struct btrfs_root *root,
+static noinline void schedule_bio(struct btrfs_root *root,
 				 struct btrfs_device *device,
 				 int rw, struct bio *bio)
 {
@@ -3979,7 +4049,7 @@
 		bio_get(bio);
 		btrfsic_submit_bio(rw, bio);
 		bio_put(bio);
-		return 0;
+		return;
 	}
 
 	/*
@@ -4013,7 +4083,6 @@
 	if (should_queue)
 		btrfs_queue_worker(&root->fs_info->submit_workers,
 				   &device->work);
-	return 0;
 }
 
 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
@@ -4036,7 +4105,8 @@
 
 	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
 			      mirror_num);
-	BUG_ON(ret);
+	if (ret) /* -ENOMEM */
+		return ret;
 
 	total_devs = bbio->num_stripes;
 	if (map_length < length) {
@@ -4055,7 +4125,7 @@
 	while (dev_nr < total_devs) {
 		if (dev_nr < total_devs - 1) {
 			bio = bio_clone(first_bio, GFP_NOFS);
-			BUG_ON(!bio);
+			BUG_ON(!bio); /* -ENOMEM */
 		} else {
 			bio = first_bio;
 		}
@@ -4209,13 +4279,13 @@
 	write_lock(&map_tree->map_tree.lock);
 	ret = add_extent_mapping(&map_tree->map_tree, em);
 	write_unlock(&map_tree->map_tree.lock);
-	BUG_ON(ret);
+	BUG_ON(ret); /* Tree corruption */
 	free_extent_map(em);
 
 	return 0;
 }
 
-static int fill_device_from_item(struct extent_buffer *leaf,
+static void fill_device_from_item(struct extent_buffer *leaf,
 				 struct btrfs_dev_item *dev_item,
 				 struct btrfs_device *device)
 {
@@ -4232,8 +4302,6 @@
 
 	ptr = (unsigned long)btrfs_device_uuid(dev_item);
 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
-
-	return 0;
 }
 
 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
@@ -4384,7 +4452,7 @@
 	 * to silence the warning eg. on PowerPC 64.
 	 */
 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
-		SetPageUptodate(sb->first_page);
+		SetPageUptodate(sb->pages[0]);
 
 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
 	array_size = btrfs_super_sys_array_size(super_copy);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 19ac950..bb6b03f 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -260,12 +260,12 @@
 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 			  struct btrfs_fs_devices **fs_devices_ret);
 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
-int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
+void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
 int btrfs_add_device(struct btrfs_trans_handle *trans,
 		     struct btrfs_root *root,
 		     struct btrfs_device *device);
 int btrfs_rm_device(struct btrfs_root *root, char *device_path);
-int btrfs_cleanup_fs_uuids(void);
+void btrfs_cleanup_fs_uuids(void);
 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
 int btrfs_grow_device(struct btrfs_trans_handle *trans,
 		      struct btrfs_device *device, u64 new_size);
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 573b899..2704646 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -58,15 +58,16 @@
 }
 
 #ifdef CONFIG_CIFS_DEBUG2
-void cifs_dump_detail(struct smb_hdr *smb)
+void cifs_dump_detail(void *buf)
 {
+	struct smb_hdr *smb = (struct smb_hdr *)buf;
+
 	cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
 		  smb->Command, smb->Status.CifsError,
 		  smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
 	cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
 }
 
-
 void cifs_dump_mids(struct TCP_Server_Info *server)
 {
 	struct list_head *tmp;
@@ -79,15 +80,15 @@
 	spin_lock(&GlobalMid_Lock);
 	list_for_each(tmp, &server->pending_mid_q) {
 		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-		cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
-			mid_entry->midState,
-			(int)mid_entry->command,
+		cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu",
+			mid_entry->mid_state,
+			le16_to_cpu(mid_entry->command),
 			mid_entry->pid,
 			mid_entry->callback_data,
 			mid_entry->mid);
 #ifdef CONFIG_CIFS_STATS2
 		cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
-			mid_entry->largeBuf,
+			mid_entry->large_buf,
 			mid_entry->resp_buf,
 			mid_entry->when_received,
 			jiffies);
@@ -217,12 +218,12 @@
 				mid_entry = list_entry(tmp3, struct mid_q_entry,
 					qhead);
 				seq_printf(m, "\tState: %d com: %d pid:"
-						" %d cbdata: %p mid %d\n",
-						mid_entry->midState,
-						(int)mid_entry->command,
-						mid_entry->pid,
-						mid_entry->callback_data,
-						mid_entry->mid);
+					      " %d cbdata: %p mid %llu\n",
+					      mid_entry->mid_state,
+					      le16_to_cpu(mid_entry->command),
+					      mid_entry->pid,
+					      mid_entry->callback_data,
+					      mid_entry->mid);
 			}
 			spin_unlock(&GlobalMid_Lock);
 		}
@@ -417,7 +418,6 @@
 
 static struct proc_dir_entry *proc_fs_cifs;
 static const struct file_operations cifsFYI_proc_fops;
-static const struct file_operations cifs_oplock_proc_fops;
 static const struct file_operations cifs_lookup_cache_proc_fops;
 static const struct file_operations traceSMB_proc_fops;
 static const struct file_operations cifs_multiuser_mount_proc_fops;
@@ -438,7 +438,6 @@
 #endif /* STATS */
 	proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
 	proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
-	proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
 	proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
 		    &cifs_linux_ext_proc_fops);
 	proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -462,7 +461,6 @@
 	remove_proc_entry("Stats", proc_fs_cifs);
 #endif
 	remove_proc_entry("MultiuserMount", proc_fs_cifs);
-	remove_proc_entry("OplockEnabled", proc_fs_cifs);
 	remove_proc_entry("SecurityFlags", proc_fs_cifs);
 	remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
 	remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -508,46 +506,6 @@
 	.write		= cifsFYI_proc_write,
 };
 
-static int cifs_oplock_proc_show(struct seq_file *m, void *v)
-{
-	seq_printf(m, "%d\n", enable_oplocks);
-	return 0;
-}
-
-static int cifs_oplock_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, cifs_oplock_proc_show, NULL);
-}
-
-static ssize_t cifs_oplock_proc_write(struct file *file,
-		const char __user *buffer, size_t count, loff_t *ppos)
-{
-	char c;
-	int rc;
-
-	printk(KERN_WARNING "CIFS: The /proc/fs/cifs/OplockEnabled interface "
-	       "will be removed in kernel version 3.4. Please migrate to "
-	       "using the 'enable_oplocks' module parameter in cifs.ko.\n");
-	rc = get_user(c, buffer);
-	if (rc)
-		return rc;
-	if (c == '0' || c == 'n' || c == 'N')
-		enable_oplocks = false;
-	else if (c == '1' || c == 'y' || c == 'Y')
-		enable_oplocks = true;
-
-	return count;
-}
-
-static const struct file_operations cifs_oplock_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= cifs_oplock_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= cifs_oplock_proc_write,
-};
-
 static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
 {
 	seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 8942b28..566e0ae 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -26,13 +26,13 @@
 void cifs_dump_mem(char *label, void *data, int length);
 #ifdef CONFIG_CIFS_DEBUG2
 #define DBG2 2
-void cifs_dump_detail(struct smb_hdr *);
+void cifs_dump_detail(void *);
 void cifs_dump_mids(struct TCP_Server_Info *);
 #else
 #define DBG2 0
 #endif
 extern int traceSMB;		/* flag which enables the function below */
-void dump_smb(struct smb_hdr *, int);
+void dump_smb(void *, int);
 #define CIFS_INFO	0x01
 #define CIFS_RC		0x02
 #define CIFS_TIMER	0x04
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index eee522c..d342128 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -85,6 +85,8 @@
 extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
 
+struct workqueue_struct	*cifsiod_wq;
+
 static int
 cifs_read_super(struct super_block *sb)
 {
@@ -1111,9 +1113,15 @@
 		cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ);
 	}
 
+	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+	if (!cifsiod_wq) {
+		rc = -ENOMEM;
+		goto out_clean_proc;
+	}
+
 	rc = cifs_fscache_register();
 	if (rc)
-		goto out_clean_proc;
+		goto out_destroy_wq;
 
 	rc = cifs_init_inodecache();
 	if (rc)
@@ -1161,6 +1169,8 @@
 	cifs_destroy_inodecache();
 out_unreg_fscache:
 	cifs_fscache_unregister();
+out_destroy_wq:
+	destroy_workqueue(cifsiod_wq);
 out_clean_proc:
 	cifs_proc_clean();
 	return rc;
@@ -1183,6 +1193,7 @@
 	cifs_destroy_mids();
 	cifs_destroy_inodecache();
 	cifs_fscache_unregister();
+	destroy_workqueue(cifsiod_wq);
 	cifs_proc_clean();
 }
 
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index fe5ecf1..d1389bb 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -125,5 +125,5 @@
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "1.76"
+#define CIFS_VERSION   "1.77"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 339ebe3..4ff6313 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -230,6 +230,12 @@
 	int flags;
 };
 
+static inline unsigned int
+get_rfc1002_length(void *buf)
+{
+	return be32_to_cpu(*((__be32 *)buf));
+}
+
 struct TCP_Server_Info {
 	struct list_head tcp_ses_list;
 	struct list_head smb_ses_list;
@@ -276,7 +282,7 @@
 				   vcnumbers */
 	int capabilities; /* allow selective disabling of caps by smb sess */
 	int timeAdj;  /* Adjust for difference in server time zone in sec */
-	__u16 CurrentMid;         /* multiplex id - rotating counter */
+	__u64 CurrentMid;         /* multiplex id - rotating counter */
 	char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
 	/* 16th byte of RFC1001 workstation name is always null */
 	char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
@@ -335,6 +341,18 @@
 	return num > 0;
 }
 
+static inline size_t
+header_size(void)
+{
+	return sizeof(struct smb_hdr);
+}
+
+static inline size_t
+max_header_size(void)
+{
+	return MAX_CIFS_HDR_SIZE;
+}
+
 /*
  * Macros to allow the TCP_Server_Info->net field and related code to drop out
  * when CONFIG_NET_NS isn't set.
@@ -583,9 +601,11 @@
  * Take a reference on the file private data. Must be called with
  * cifs_file_list_lock held.
  */
-static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+static inline
+struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file)
 {
 	++cifs_file->count;
+	return cifs_file;
 }
 
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
@@ -606,7 +626,7 @@
 	bool delete_pending;		/* DELETE_ON_CLOSE is set */
 	bool invalid_mapping;		/* pagecache is invalid */
 	unsigned long time;		/* jiffies of last update of inode */
-	u64  server_eof;		/* current file size on server */
+	u64  server_eof;		/* current file size on server -- protected by i_lock */
 	u64  uniqueid;			/* server inode number */
 	u64  createtime;		/* creation time on server */
 #ifdef CONFIG_CIFS_FSCACHE
@@ -713,8 +733,8 @@
 /* one of these for every pending CIFS request to the server */
 struct mid_q_entry {
 	struct list_head qhead;	/* mids waiting on reply from this server */
-	__u16 mid;		/* multiplex id */
-	__u16 pid;		/* process id */
+	__u64 mid;		/* multiplex id */
+	__u32 pid;		/* process id */
 	__u32 sequence_number;  /* for CIFS signing */
 	unsigned long when_alloc;  /* when mid was created */
 #ifdef CONFIG_CIFS_STATS2
@@ -724,10 +744,10 @@
 	mid_receive_t *receive; /* call receive callback */
 	mid_callback_t *callback; /* call completion callback */
 	void *callback_data;	  /* general purpose pointer for callback */
-	struct smb_hdr *resp_buf;	/* pointer to received SMB header */
-	int midState;	/* wish this were enum but can not pass to wait_event */
-	__u8 command;	/* smb command code */
-	bool largeBuf:1;	/* if valid response, is pointer to large buf */
+	void *resp_buf;		/* pointer to received SMB header */
+	int mid_state;	/* wish this were enum but can not pass to wait_event */
+	__le16 command;		/* smb command code */
+	bool large_buf:1;	/* if valid response, is pointer to large buf */
 	bool multiRsp:1;	/* multiple trans2 responses for one request  */
 	bool multiEnd:1;	/* both received */
 };
@@ -1052,5 +1072,6 @@
 void cifs_oplock_break(struct work_struct *work);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
+extern struct workqueue_struct *cifsiod_wq;
 
 #endif	/* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 503e73d..96192c1 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -77,7 +77,7 @@
 			struct smb_hdr * /* out */ ,
 			int * /* bytes returned */ , const int long_op);
 extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
-			struct smb_hdr *in_buf, int flags);
+			    char *in_buf, int flags);
 extern int cifs_check_receive(struct mid_q_entry *mid,
 			struct TCP_Server_Info *server, bool log_error);
 extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -91,9 +91,8 @@
 extern void cifs_add_credits(struct TCP_Server_Info *server,
 			     const unsigned int add);
 extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
-extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
-extern bool is_valid_oplock_break(struct smb_hdr *smb,
-				  struct TCP_Server_Info *);
+extern int checkSMB(char *buf, unsigned int length);
+extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
 extern bool backup_cred(struct cifs_sb_info *);
 extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
 extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
@@ -107,7 +106,7 @@
 extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
 extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
 				const unsigned short int port);
-extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr);
+extern int map_smb_to_linux_error(char *buf, bool logErr);
 extern void header_assemble(struct smb_hdr *, char /* command */ ,
 			    const struct cifs_tcon *, int /* length of
 			    fixed section (word count) in two byte units */);
@@ -116,7 +115,7 @@
 				void **request_buf);
 extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
 			     const struct nls_table *nls_cp);
-extern __u16 GetNextMid(struct TCP_Server_Info *server);
+extern __u64 GetNextMid(struct TCP_Server_Info *server);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
@@ -484,18 +483,25 @@
 /* asynchronous write support */
 struct cifs_writedata {
 	struct kref			refcount;
+	struct list_head		list;
+	struct completion		done;
 	enum writeback_sync_modes	sync_mode;
 	struct work_struct		work;
 	struct cifsFileInfo		*cfile;
 	__u64				offset;
+	pid_t				pid;
 	unsigned int			bytes;
 	int				result;
+	void (*marshal_iov) (struct kvec *iov,
+			     struct cifs_writedata *wdata);
 	unsigned int			nr_pages;
 	struct page			*pages[1];
 };
 
 int cifs_async_writev(struct cifs_writedata *wdata);
-struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages);
+void cifs_writev_complete(struct work_struct *work);
+struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
+						work_func_t complete);
 void cifs_writedata_release(struct kref *refcount);
 
 #endif			/* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 70aac35c..f52c5ab 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -696,7 +696,7 @@
 	if (rc)
 		return rc;
 
-	rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0);
 	if (rc)
 		cFYI(1, "Tree disconnect failed %d", rc);
 
@@ -792,7 +792,7 @@
 	pSMB->hdr.Uid = ses->Suid;
 
 	pSMB->AndXCommand = 0xFF;
-	rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0);
 session_already_dead:
 	mutex_unlock(&ses->session_mutex);
 
@@ -1414,8 +1414,7 @@
 static int
 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
-	READ_RSP *rsp = (READ_RSP *)server->smallbuf;
-	unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
+	unsigned int rfclen = get_rfc1002_length(server->smallbuf);
 	int remaining = rfclen + 4 - server->total_read;
 	struct cifs_readdata *rdata = mid->callback_data;
 
@@ -1424,7 +1423,7 @@
 
 		length = cifs_read_from_socket(server, server->bigbuf,
 				min_t(unsigned int, remaining,
-					CIFSMaxBufSize + MAX_CIFS_HDR_SIZE));
+					CIFSMaxBufSize + max_header_size()));
 		if (length < 0)
 			return length;
 		server->total_read += length;
@@ -1435,19 +1434,40 @@
 	return 0;
 }
 
+static inline size_t
+read_rsp_size(void)
+{
+	return sizeof(READ_RSP);
+}
+
+static inline unsigned int
+read_data_offset(char *buf)
+{
+	READ_RSP *rsp = (READ_RSP *)buf;
+	return le16_to_cpu(rsp->DataOffset);
+}
+
+static inline unsigned int
+read_data_length(char *buf)
+{
+	READ_RSP *rsp = (READ_RSP *)buf;
+	return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
+	       le16_to_cpu(rsp->DataLength);
+}
+
 static int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
 	int length, len;
 	unsigned int data_offset, remaining, data_len;
 	struct cifs_readdata *rdata = mid->callback_data;
-	READ_RSP *rsp = (READ_RSP *)server->smallbuf;
-	unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4;
+	char *buf = server->smallbuf;
+	unsigned int buflen = get_rfc1002_length(buf) + 4;
 	u64 eof;
 	pgoff_t eof_index;
 	struct page *page, *tpage;
 
-	cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__,
+	cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
 		mid->mid, rdata->offset, rdata->bytes);
 
 	/*
@@ -1455,10 +1475,9 @@
 	 * can if there's not enough data. At this point, we've read down to
 	 * the Mid.
 	 */
-	len = min_t(unsigned int, rfclen, sizeof(*rsp)) -
-			sizeof(struct smb_hdr) + 1;
+	len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
 
-	rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1;
+	rdata->iov[0].iov_base = buf + header_size() - 1;
 	rdata->iov[0].iov_len = len;
 
 	length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1467,7 +1486,7 @@
 	server->total_read += length;
 
 	/* Was the SMB read successful? */
-	rdata->result = map_smb_to_linux_error(&rsp->hdr, false);
+	rdata->result = map_smb_to_linux_error(buf, false);
 	if (rdata->result != 0) {
 		cFYI(1, "%s: server returned error %d", __func__,
 			rdata->result);
@@ -1475,14 +1494,14 @@
 	}
 
 	/* Is there enough to get to the rest of the READ_RSP header? */
-	if (server->total_read < sizeof(READ_RSP)) {
+	if (server->total_read < read_rsp_size()) {
 		cFYI(1, "%s: server returned short header. got=%u expected=%zu",
-			__func__, server->total_read, sizeof(READ_RSP));
+			__func__, server->total_read, read_rsp_size());
 		rdata->result = -EIO;
 		return cifs_readv_discard(server, mid);
 	}
 
-	data_offset = le16_to_cpu(rsp->DataOffset) + 4;
+	data_offset = read_data_offset(buf) + 4;
 	if (data_offset < server->total_read) {
 		/*
 		 * win2k8 sometimes sends an offset of 0 when the read
@@ -1506,7 +1525,7 @@
 	len = data_offset - server->total_read;
 	if (len > 0) {
 		/* read any junk before data into the rest of smallbuf */
-		rdata->iov[0].iov_base = server->smallbuf + server->total_read;
+		rdata->iov[0].iov_base = buf + server->total_read;
 		rdata->iov[0].iov_len = len;
 		length = cifs_readv_from_socket(server, rdata->iov, 1, len);
 		if (length < 0)
@@ -1515,15 +1534,14 @@
 	}
 
 	/* set up first iov for signature check */
-	rdata->iov[0].iov_base = server->smallbuf;
+	rdata->iov[0].iov_base = buf;
 	rdata->iov[0].iov_len = server->total_read;
 	cFYI(1, "0: iov_base=%p iov_len=%zu",
 		rdata->iov[0].iov_base, rdata->iov[0].iov_len);
 
 	/* how much data is in the response? */
-	data_len = le16_to_cpu(rsp->DataLengthHigh) << 16;
-	data_len += le16_to_cpu(rsp->DataLength);
-	if (data_offset + data_len > rfclen) {
+	data_len = read_data_length(buf);
+	if (data_offset + data_len > buflen) {
 		/* data_len is corrupt -- discard frame */
 		rdata->result = -EIO;
 		return cifs_readv_discard(server, mid);
@@ -1602,11 +1620,11 @@
 
 	rdata->bytes = length;
 
-	cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read,
-		rfclen, remaining);
+	cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
+		buflen, remaining);
 
 	/* discard anything left over */
-	if (server->total_read < rfclen)
+	if (server->total_read < buflen)
 		return cifs_readv_discard(server, mid);
 
 	dequeue_mid(mid, false);
@@ -1647,10 +1665,10 @@
 	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
 	struct TCP_Server_Info *server = tcon->ses->server;
 
-	cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__,
-		mid->mid, mid->midState, rdata->result, rdata->bytes);
+	cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
+		mid->mid, mid->mid_state, rdata->result, rdata->bytes);
 
-	switch (mid->midState) {
+	switch (mid->mid_state) {
 	case MID_RESPONSE_RECEIVED:
 		/* result already set, check signature */
 		if (server->sec_mode &
@@ -1671,7 +1689,7 @@
 		rdata->result = -EIO;
 	}
 
-	queue_work(system_nrt_wq, &rdata->work);
+	queue_work(cifsiod_wq, &rdata->work);
 	DeleteMidQEntry(mid);
 	cifs_add_credits(server, 1);
 }
@@ -2017,7 +2035,7 @@
 	kref_put(&wdata->refcount, cifs_writedata_release);
 }
 
-static void
+void
 cifs_writev_complete(struct work_struct *work)
 {
 	struct cifs_writedata *wdata = container_of(work,
@@ -2026,7 +2044,9 @@
 	int i = 0;
 
 	if (wdata->result == 0) {
+		spin_lock(&inode->i_lock);
 		cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
+		spin_unlock(&inode->i_lock);
 		cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
 					 wdata->bytes);
 	} else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
@@ -2047,7 +2067,7 @@
 }
 
 struct cifs_writedata *
-cifs_writedata_alloc(unsigned int nr_pages)
+cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
 {
 	struct cifs_writedata *wdata;
 
@@ -2061,14 +2081,16 @@
 	wdata = kzalloc(sizeof(*wdata) +
 			sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
 	if (wdata != NULL) {
-		INIT_WORK(&wdata->work, cifs_writev_complete);
 		kref_init(&wdata->refcount);
+		INIT_LIST_HEAD(&wdata->list);
+		init_completion(&wdata->done);
+		INIT_WORK(&wdata->work, complete);
 	}
 	return wdata;
 }
 
 /*
- * Check the midState and signature on received buffer (if any), and queue the
+ * Check the mid_state and signature on received buffer (if any), and queue the
  * workqueue completion task.
  */
 static void
@@ -2079,7 +2101,7 @@
 	unsigned int written;
 	WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
 
-	switch (mid->midState) {
+	switch (mid->mid_state) {
 	case MID_RESPONSE_RECEIVED:
 		wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
 		if (wdata->result != 0)
@@ -2111,7 +2133,7 @@
 		break;
 	}
 
-	queue_work(system_nrt_wq, &wdata->work);
+	queue_work(cifsiod_wq, &wdata->work);
 	DeleteMidQEntry(mid);
 	cifs_add_credits(tcon->ses->server, 1);
 }
@@ -2124,7 +2146,6 @@
 	WRITE_REQ *smb = NULL;
 	int wct;
 	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
-	struct inode *inode = wdata->cfile->dentry->d_inode;
 	struct kvec *iov = NULL;
 
 	if (tcon->ses->capabilities & CAP_LARGE_FILES) {
@@ -2148,8 +2169,8 @@
 		goto async_writev_out;
 	}
 
-	smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid);
-	smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16));
+	smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
+	smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
 
 	smb->AndXCommand = 0xFF;	/* none */
 	smb->Fid = wdata->cfile->netfid;
@@ -2167,15 +2188,13 @@
 	iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
 	iov[0].iov_base = smb;
 
-	/* marshal up the pages into iov array */
-	wdata->bytes = 0;
-	for (i = 0; i < wdata->nr_pages; i++) {
-		iov[i + 1].iov_len = min(inode->i_size -
-				      page_offset(wdata->pages[i]),
-					(loff_t)PAGE_CACHE_SIZE);
-		iov[i + 1].iov_base = kmap(wdata->pages[i]);
-		wdata->bytes += iov[i + 1].iov_len;
-	}
+	/*
+	 * This function should marshal up the page array into the kvec
+	 * array, reserving [0] for the header. It should kmap the pages
+	 * and set the iov_len properly for each one. It may also set
+	 * wdata->bytes too.
+	 */
+	wdata->marshal_iov(iov, wdata);
 
 	cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
 
@@ -2420,8 +2439,7 @@
 			(struct smb_hdr *) pSMB, &bytes_returned);
 		cifs_small_buf_release(pSMB);
 	} else {
-		rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB,
-				      timeout);
+		rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, timeout);
 		/* SMB buffer freed by function above */
 	}
 	cifs_stats_inc(&tcon->num_locks);
@@ -2588,7 +2606,7 @@
 	pSMB->FileID = (__u16) smb_file_id;
 	pSMB->LastWriteTime = 0xFFFFFFFF;
 	pSMB->ByteCount = 0;
-	rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
 	cifs_stats_inc(&tcon->num_closes);
 	if (rc) {
 		if (rc != -EINTR) {
@@ -2617,7 +2635,7 @@
 
 	pSMB->FileID = (__u16) smb_file_id;
 	pSMB->ByteCount = 0;
-	rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
 	cifs_stats_inc(&tcon->num_flushes);
 	if (rc)
 		cERROR(1, "Send error in Flush = %d", rc);
@@ -3874,13 +3892,12 @@
 	int rc = 0;
 	int bytes_returned = 0;
 	SET_SEC_DESC_REQ *pSMB = NULL;
-	NTRANSACT_RSP *pSMBr = NULL;
+	void *pSMBr;
 
 setCifsAclRetry:
-	rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB,
-			(void **) &pSMBr);
+	rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, &pSMBr);
 	if (rc)
-			return (rc);
+		return rc;
 
 	pSMB->MaxSetupCount = 0;
 	pSMB->Reserved = 0;
@@ -3908,9 +3925,8 @@
 	pSMB->AclFlags = cpu_to_le32(aclflag);
 
 	if (pntsd && acllen) {
-		memcpy((char *) &pSMBr->hdr.Protocol + data_offset,
-			(char *) pntsd,
-			acllen);
+		memcpy((char *)pSMBr + offsetof(struct smb_hdr, Protocol) +
+				data_offset, pntsd, acllen);
 		inc_rfc1001_len(pSMB, byte_count + data_count);
 	} else
 		inc_rfc1001_len(pSMB, byte_count);
@@ -4625,7 +4641,7 @@
 
 	pSMB->FileID = searchHandle;
 	pSMB->ByteCount = 0;
-	rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
 	if (rc)
 		cERROR(1, "Send error in FindClose = %d", rc);
 
@@ -5646,7 +5662,7 @@
 	pSMB->Reserved4 = 0;
 	inc_rfc1001_len(pSMB, byte_count);
 	pSMB->ByteCount = cpu_to_le16(byte_count);
-	rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
 	if (rc) {
 		cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc);
 	}
@@ -5690,7 +5706,8 @@
 	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
 	offset = param_offset + params;
 
-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+	data_offset = (char *)pSMB +
+			offsetof(struct smb_hdr, Protocol) + offset;
 
 	count = sizeof(FILE_BASIC_INFO);
 	pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -5715,7 +5732,7 @@
 	inc_rfc1001_len(pSMB, byte_count);
 	pSMB->ByteCount = cpu_to_le16(byte_count);
 	memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
-	rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
 	if (rc)
 		cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
 
@@ -5774,7 +5791,7 @@
 	inc_rfc1001_len(pSMB, byte_count);
 	pSMB->ByteCount = cpu_to_le16(byte_count);
 	*data_offset = delete_file ? 1 : 0;
-	rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
 	if (rc)
 		cFYI(1, "Send error in SetFileDisposition = %d", rc);
 
@@ -5959,7 +5976,7 @@
 		       u16 fid, u32 pid_of_opener)
 {
 	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
-	FILE_UNIX_BASIC_INFO *data_offset;
+	char *data_offset;
 	int rc = 0;
 	u16 params, param_offset, offset, byte_count, count;
 
@@ -5981,8 +5998,9 @@
 	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
 	offset = param_offset + params;
 
-	data_offset = (FILE_UNIX_BASIC_INFO *)
-				((char *)(&pSMB->hdr.Protocol) + offset);
+	data_offset = (char *)pSMB +
+			offsetof(struct smb_hdr, Protocol) + offset;
+
 	count = sizeof(FILE_UNIX_BASIC_INFO);
 
 	pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -6004,9 +6022,9 @@
 	inc_rfc1001_len(pSMB, byte_count);
 	pSMB->ByteCount = cpu_to_le16(byte_count);
 
-	cifs_fill_unix_set_info(data_offset, args);
+	cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args);
 
-	rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
 	if (rc)
 		cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5560e1d..d81e933 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -40,6 +40,8 @@
 #include <linux/module.h>
 #include <keys/user-type.h>
 #include <net/ipv6.h>
+#include <linux/parser.h>
+
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
@@ -63,6 +65,193 @@
 #define TLINK_ERROR_EXPIRE	(1 * HZ)
 #define TLINK_IDLE_EXPIRE	(600 * HZ)
 
+enum {
+
+	/* Mount options that take no arguments */
+	Opt_user_xattr, Opt_nouser_xattr,
+	Opt_forceuid, Opt_noforceuid,
+	Opt_noblocksend, Opt_noautotune,
+	Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
+	Opt_mapchars, Opt_nomapchars, Opt_sfu,
+	Opt_nosfu, Opt_nodfs, Opt_posixpaths,
+	Opt_noposixpaths, Opt_nounix,
+	Opt_nocase,
+	Opt_brl, Opt_nobrl,
+	Opt_forcemandatorylock, Opt_setuids,
+	Opt_nosetuids, Opt_dynperm, Opt_nodynperm,
+	Opt_nohard, Opt_nosoft,
+	Opt_nointr, Opt_intr,
+	Opt_nostrictsync, Opt_strictsync,
+	Opt_serverino, Opt_noserverino,
+	Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl,
+	Opt_acl, Opt_noacl, Opt_locallease,
+	Opt_sign, Opt_seal, Opt_direct,
+	Opt_strictcache, Opt_noac,
+	Opt_fsc, Opt_mfsymlinks,
+	Opt_multiuser, Opt_sloppy,
+
+	/* Mount options which take numeric value */
+	Opt_backupuid, Opt_backupgid, Opt_uid,
+	Opt_cruid, Opt_gid, Opt_file_mode,
+	Opt_dirmode, Opt_port,
+	Opt_rsize, Opt_wsize, Opt_actimeo,
+
+	/* Mount options which take string value */
+	Opt_user, Opt_pass, Opt_ip,
+	Opt_unc, Opt_domain,
+	Opt_srcaddr, Opt_prefixpath,
+	Opt_iocharset, Opt_sockopt,
+	Opt_netbiosname, Opt_servern,
+	Opt_ver, Opt_sec,
+
+	/* Mount options to be ignored */
+	Opt_ignore,
+
+	/* Options which could be blank */
+	Opt_blank_pass,
+
+	Opt_err
+};
+
+static const match_table_t cifs_mount_option_tokens = {
+
+	{ Opt_user_xattr, "user_xattr" },
+	{ Opt_nouser_xattr, "nouser_xattr" },
+	{ Opt_forceuid, "forceuid" },
+	{ Opt_noforceuid, "noforceuid" },
+	{ Opt_noblocksend, "noblocksend" },
+	{ Opt_noautotune, "noautotune" },
+	{ Opt_hard, "hard" },
+	{ Opt_soft, "soft" },
+	{ Opt_perm, "perm" },
+	{ Opt_noperm, "noperm" },
+	{ Opt_mapchars, "mapchars" },
+	{ Opt_nomapchars, "nomapchars" },
+	{ Opt_sfu, "sfu" },
+	{ Opt_nosfu, "nosfu" },
+	{ Opt_nodfs, "nodfs" },
+	{ Opt_posixpaths, "posixpaths" },
+	{ Opt_noposixpaths, "noposixpaths" },
+	{ Opt_nounix, "nounix" },
+	{ Opt_nounix, "nolinux" },
+	{ Opt_nocase, "nocase" },
+	{ Opt_nocase, "ignorecase" },
+	{ Opt_brl, "brl" },
+	{ Opt_nobrl, "nobrl" },
+	{ Opt_nobrl, "nolock" },
+	{ Opt_forcemandatorylock, "forcemandatorylock" },
+	{ Opt_forcemandatorylock, "forcemand" },
+	{ Opt_setuids, "setuids" },
+	{ Opt_nosetuids, "nosetuids" },
+	{ Opt_dynperm, "dynperm" },
+	{ Opt_nodynperm, "nodynperm" },
+	{ Opt_nohard, "nohard" },
+	{ Opt_nosoft, "nosoft" },
+	{ Opt_nointr, "nointr" },
+	{ Opt_intr, "intr" },
+	{ Opt_nostrictsync, "nostrictsync" },
+	{ Opt_strictsync, "strictsync" },
+	{ Opt_serverino, "serverino" },
+	{ Opt_noserverino, "noserverino" },
+	{ Opt_rwpidforward, "rwpidforward" },
+	{ Opt_cifsacl, "cifsacl" },
+	{ Opt_nocifsacl, "nocifsacl" },
+	{ Opt_acl, "acl" },
+	{ Opt_noacl, "noacl" },
+	{ Opt_locallease, "locallease" },
+	{ Opt_sign, "sign" },
+	{ Opt_seal, "seal" },
+	{ Opt_direct, "direct" },
+	{ Opt_direct, "forceddirectio" },
+	{ Opt_strictcache, "strictcache" },
+	{ Opt_noac, "noac" },
+	{ Opt_fsc, "fsc" },
+	{ Opt_mfsymlinks, "mfsymlinks" },
+	{ Opt_multiuser, "multiuser" },
+	{ Opt_sloppy, "sloppy" },
+
+	{ Opt_backupuid, "backupuid=%s" },
+	{ Opt_backupgid, "backupgid=%s" },
+	{ Opt_uid, "uid=%s" },
+	{ Opt_cruid, "cruid=%s" },
+	{ Opt_gid, "gid=%s" },
+	{ Opt_file_mode, "file_mode=%s" },
+	{ Opt_dirmode, "dirmode=%s" },
+	{ Opt_dirmode, "dir_mode=%s" },
+	{ Opt_port, "port=%s" },
+	{ Opt_rsize, "rsize=%s" },
+	{ Opt_wsize, "wsize=%s" },
+	{ Opt_actimeo, "actimeo=%s" },
+
+	{ Opt_user, "user=%s" },
+	{ Opt_user, "username=%s" },
+	{ Opt_blank_pass, "pass=" },
+	{ Opt_pass, "pass=%s" },
+	{ Opt_pass, "password=%s" },
+	{ Opt_ip, "ip=%s" },
+	{ Opt_ip, "addr=%s" },
+	{ Opt_unc, "unc=%s" },
+	{ Opt_unc, "target=%s" },
+	{ Opt_unc, "path=%s" },
+	{ Opt_domain, "dom=%s" },
+	{ Opt_domain, "domain=%s" },
+	{ Opt_domain, "workgroup=%s" },
+	{ Opt_srcaddr, "srcaddr=%s" },
+	{ Opt_prefixpath, "prefixpath=%s" },
+	{ Opt_iocharset, "iocharset=%s" },
+	{ Opt_sockopt, "sockopt=%s" },
+	{ Opt_netbiosname, "netbiosname=%s" },
+	{ Opt_servern, "servern=%s" },
+	{ Opt_ver, "ver=%s" },
+	{ Opt_ver, "vers=%s" },
+	{ Opt_ver, "version=%s" },
+	{ Opt_sec, "sec=%s" },
+
+	{ Opt_ignore, "cred" },
+	{ Opt_ignore, "credentials" },
+	{ Opt_ignore, "guest" },
+	{ Opt_ignore, "rw" },
+	{ Opt_ignore, "ro" },
+	{ Opt_ignore, "suid" },
+	{ Opt_ignore, "nosuid" },
+	{ Opt_ignore, "exec" },
+	{ Opt_ignore, "noexec" },
+	{ Opt_ignore, "nodev" },
+	{ Opt_ignore, "noauto" },
+	{ Opt_ignore, "dev" },
+	{ Opt_ignore, "mand" },
+	{ Opt_ignore, "nomand" },
+	{ Opt_ignore, "_netdev" },
+
+	{ Opt_err, NULL }
+};
+
+enum {
+	Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
+	Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
+	Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
+	Opt_sec_nontlm, Opt_sec_lanman,
+	Opt_sec_none,
+
+	Opt_sec_err
+};
+
+static const match_table_t cifs_secflavor_tokens = {
+	{ Opt_sec_krb5, "krb5" },
+	{ Opt_sec_krb5i, "krb5i" },
+	{ Opt_sec_krb5p, "krb5p" },
+	{ Opt_sec_ntlmsspi, "ntlmsspi" },
+	{ Opt_sec_ntlmssp, "ntlmssp" },
+	{ Opt_ntlm, "ntlm" },
+	{ Opt_sec_ntlmi, "ntlmi" },
+	{ Opt_sec_ntlmv2i, "ntlmv2i" },
+	{ Opt_sec_nontlm, "nontlm" },
+	{ Opt_sec_lanman, "lanman" },
+	{ Opt_sec_none, "none" },
+
+	{ Opt_sec_err, NULL }
+};
+
 static int ip_connect(struct TCP_Server_Info *server);
 static int generic_ip_connect(struct TCP_Server_Info *server);
 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -143,8 +332,8 @@
 	spin_lock(&GlobalMid_Lock);
 	list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
 		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-		if (mid_entry->midState == MID_REQUEST_SUBMITTED)
-			mid_entry->midState = MID_RETRY_NEEDED;
+		if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
+			mid_entry->mid_state = MID_RETRY_NEEDED;
 		list_move(&mid_entry->qhead, &retry_list);
 	}
 	spin_unlock(&GlobalMid_Lock);
@@ -183,8 +372,9 @@
 		-EINVAL = invalid transact2
 
  */
-static int check2ndT2(struct smb_hdr *pSMB)
+static int check2ndT2(char *buf)
 {
+	struct smb_hdr *pSMB = (struct smb_hdr *)buf;
 	struct smb_t2_rsp *pSMBt;
 	int remaining;
 	__u16 total_data_size, data_in_this_rsp;
@@ -224,10 +414,10 @@
 	return remaining;
 }
 
-static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
+static int coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
 {
-	struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)psecond;
-	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)pTargetSMB;
+	struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
+	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)target_hdr;
 	char *data_area_of_tgt;
 	char *data_area_of_src;
 	int remaining;
@@ -280,23 +470,23 @@
 	put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
 
 	/* fix up the BCC */
-	byte_count = get_bcc(pTargetSMB);
+	byte_count = get_bcc(target_hdr);
 	byte_count += total_in_src;
 	/* is the result too big for the field? */
 	if (byte_count > USHRT_MAX) {
 		cFYI(1, "coalesced BCC too large (%u)", byte_count);
 		return -EPROTO;
 	}
-	put_bcc(byte_count, pTargetSMB);
+	put_bcc(byte_count, target_hdr);
 
-	byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
+	byte_count = be32_to_cpu(target_hdr->smb_buf_length);
 	byte_count += total_in_src;
 	/* don't allow buffer to overflow */
 	if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
 		cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
 		return -ENOBUFS;
 	}
-	pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
+	target_hdr->smb_buf_length = cpu_to_be32(byte_count);
 
 	/* copy second buffer into end of first buffer */
 	memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
@@ -334,7 +524,7 @@
 			server->hostname);
 
 requeue_echo:
-	queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
+	queue_delayed_work(cifsiod_wq, &server->echo, SMB_ECHO_INTERVAL);
 }
 
 static bool
@@ -350,7 +540,7 @@
 		}
 	} else if (server->large_buf) {
 		/* we are reusing a dirty large buf, clear its start */
-		memset(server->bigbuf, 0, sizeof(struct smb_hdr));
+		memset(server->bigbuf, 0, header_size());
 	}
 
 	if (!server->smallbuf) {
@@ -364,7 +554,7 @@
 		/* beginning of smb buffer is cleared in our buf_get */
 	} else {
 		/* if existing small buf clear beginning */
-		memset(server->smallbuf, 0, sizeof(struct smb_hdr));
+		memset(server->smallbuf, 0, header_size());
 	}
 
 	return true;
@@ -566,15 +756,16 @@
 }
 
 static struct mid_q_entry *
-find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf)
+find_mid(struct TCP_Server_Info *server, char *buffer)
 {
+	struct smb_hdr *buf = (struct smb_hdr *)buffer;
 	struct mid_q_entry *mid;
 
 	spin_lock(&GlobalMid_Lock);
 	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
 		if (mid->mid == buf->Mid &&
-		    mid->midState == MID_REQUEST_SUBMITTED &&
-		    mid->command == buf->Command) {
+		    mid->mid_state == MID_REQUEST_SUBMITTED &&
+		    le16_to_cpu(mid->command) == buf->Command) {
 			spin_unlock(&GlobalMid_Lock);
 			return mid;
 		}
@@ -591,16 +782,16 @@
 #endif
 	spin_lock(&GlobalMid_Lock);
 	if (!malformed)
-		mid->midState = MID_RESPONSE_RECEIVED;
+		mid->mid_state = MID_RESPONSE_RECEIVED;
 	else
-		mid->midState = MID_RESPONSE_MALFORMED;
+		mid->mid_state = MID_RESPONSE_MALFORMED;
 	list_del_init(&mid->qhead);
 	spin_unlock(&GlobalMid_Lock);
 }
 
 static void
 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
-	   struct smb_hdr *buf, int malformed)
+	   char *buf, int malformed)
 {
 	if (malformed == 0 && check2ndT2(buf) > 0) {
 		mid->multiRsp = true;
@@ -620,13 +811,13 @@
 		} else {
 			/* Have first buffer */
 			mid->resp_buf = buf;
-			mid->largeBuf = true;
+			mid->large_buf = true;
 			server->bigbuf = NULL;
 		}
 		return;
 	}
 	mid->resp_buf = buf;
-	mid->largeBuf = server->large_buf;
+	mid->large_buf = server->large_buf;
 	/* Was previous buf put in mpx struct for multi-rsp? */
 	if (!mid->multiRsp) {
 		/* smb buffer will be freed by user thread */
@@ -682,8 +873,8 @@
 		spin_lock(&GlobalMid_Lock);
 		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-			cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
-			mid_entry->midState = MID_SHUTDOWN;
+			cFYI(1, "Clearing mid 0x%llx", mid_entry->mid);
+			mid_entry->mid_state = MID_SHUTDOWN;
 			list_move(&mid_entry->qhead, &dispose_list);
 		}
 		spin_unlock(&GlobalMid_Lock);
@@ -691,7 +882,7 @@
 		/* now walk dispose list and issue callbacks */
 		list_for_each_safe(tmp, tmp2, &dispose_list) {
 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-			cFYI(1, "Callback mid 0x%x", mid_entry->mid);
+			cFYI(1, "Callback mid 0x%llx", mid_entry->mid);
 			list_del_init(&mid_entry->qhead);
 			mid_entry->callback(mid_entry);
 		}
@@ -731,11 +922,10 @@
 {
 	int length;
 	char *buf = server->smallbuf;
-	struct smb_hdr *smb_buffer = (struct smb_hdr *)buf;
-	unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
+	unsigned int pdu_length = get_rfc1002_length(buf);
 
 	/* make sure this will fit in a large buffer */
-	if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+	if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
 		cERROR(1, "SMB response too long (%u bytes)",
 			pdu_length);
 		cifs_reconnect(server);
@@ -746,20 +936,18 @@
 	/* switch to large buffer if too big for a small one */
 	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
 		server->large_buf = true;
-		memcpy(server->bigbuf, server->smallbuf, server->total_read);
+		memcpy(server->bigbuf, buf, server->total_read);
 		buf = server->bigbuf;
-		smb_buffer = (struct smb_hdr *)buf;
 	}
 
 	/* now read the rest */
-	length = cifs_read_from_socket(server,
-			  buf + sizeof(struct smb_hdr) - 1,
-			  pdu_length - sizeof(struct smb_hdr) + 1 + 4);
+	length = cifs_read_from_socket(server, buf + header_size() - 1,
+				       pdu_length - header_size() + 1 + 4);
 	if (length < 0)
 		return length;
 	server->total_read += length;
 
-	dump_smb(smb_buffer, server->total_read);
+	dump_smb(buf, server->total_read);
 
 	/*
 	 * We know that we received enough to get to the MID as we
@@ -770,7 +958,7 @@
 	 * 48 bytes is enough to display the header and a little bit
 	 * into the payload for debugging purposes.
 	 */
-	length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read);
+	length = checkSMB(buf, server->total_read);
 	if (length != 0)
 		cifs_dump_mem("Bad SMB: ", buf,
 			min_t(unsigned int, server->total_read, 48));
@@ -778,7 +966,7 @@
 	if (!mid)
 		return length;
 
-	handle_mid(mid, server, smb_buffer, length);
+	handle_mid(mid, server, buf, length);
 	return 0;
 }
 
@@ -789,7 +977,6 @@
 	struct TCP_Server_Info *server = p;
 	unsigned int pdu_length;
 	char *buf = NULL;
-	struct smb_hdr *smb_buffer = NULL;
 	struct task_struct *task_to_wake = NULL;
 	struct mid_q_entry *mid_entry;
 
@@ -810,7 +997,6 @@
 			continue;
 
 		server->large_buf = false;
-		smb_buffer = (struct smb_hdr *)server->smallbuf;
 		buf = server->smallbuf;
 		pdu_length = 4; /* enough to get RFC1001 header */
 
@@ -823,14 +1009,14 @@
 		 * The right amount was read from socket - 4 bytes,
 		 * so we can now interpret the length field.
 		 */
-		pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
+		pdu_length = get_rfc1002_length(buf);
 
 		cFYI(1, "RFC1002 header 0x%x", pdu_length);
 		if (!is_smb_response(server, buf[0]))
 			continue;
 
 		/* make sure we have enough to get to the MID */
-		if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) {
+		if (pdu_length < header_size() - 1 - 4) {
 			cERROR(1, "SMB response too short (%u bytes)",
 				pdu_length);
 			cifs_reconnect(server);
@@ -840,12 +1026,12 @@
 
 		/* read down to the MID */
 		length = cifs_read_from_socket(server, buf + 4,
-					sizeof(struct smb_hdr) - 1 - 4);
+					       header_size() - 1 - 4);
 		if (length < 0)
 			continue;
 		server->total_read += length;
 
-		mid_entry = find_mid(server, smb_buffer);
+		mid_entry = find_mid(server, buf);
 
 		if (!mid_entry || !mid_entry->receive)
 			length = standard_receive3(server, mid_entry);
@@ -855,22 +1041,19 @@
 		if (length < 0)
 			continue;
 
-		if (server->large_buf) {
+		if (server->large_buf)
 			buf = server->bigbuf;
-			smb_buffer = (struct smb_hdr *)buf;
-		}
 
 		server->lstrp = jiffies;
 		if (mid_entry != NULL) {
 			if (!mid_entry->multiRsp || mid_entry->multiEnd)
 				mid_entry->callback(mid_entry);
-		} else if (!is_valid_oplock_break(smb_buffer, server)) {
+		} else if (!is_valid_oplock_break(buf, server)) {
 			cERROR(1, "No task to wake, unknown frame received! "
 				   "NumMids %d", atomic_read(&midCount));
-			cifs_dump_mem("Received Data is: ", buf,
-				      sizeof(struct smb_hdr));
+			cifs_dump_mem("Received Data is: ", buf, header_size());
 #ifdef CONFIG_CIFS_DEBUG2
-			cifs_dump_detail(smb_buffer);
+			cifs_dump_detail(buf);
 			cifs_dump_mids(server);
 #endif /* CIFS_DEBUG2 */
 
@@ -926,23 +1109,95 @@
 	return dst;
 }
 
+static int get_option_ul(substring_t args[], unsigned long *option)
+{
+	int rc;
+	char *string;
+
+	string = match_strdup(args);
+	if (string == NULL)
+		return -ENOMEM;
+	rc = kstrtoul(string, 10, option);
+	kfree(string);
+
+	return rc;
+}
+
+
+static int cifs_parse_security_flavors(char *value,
+				       struct smb_vol *vol)
+{
+
+	substring_t args[MAX_OPT_ARGS];
+
+	switch (match_token(value, cifs_secflavor_tokens, args)) {
+	case Opt_sec_krb5:
+		vol->secFlg |= CIFSSEC_MAY_KRB5;
+		break;
+	case Opt_sec_krb5i:
+		vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
+		break;
+	case Opt_sec_krb5p:
+		/* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */
+		cERROR(1, "Krb5 cifs privacy not supported");
+		break;
+	case Opt_sec_ntlmssp:
+		vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
+		break;
+	case Opt_sec_ntlmsspi:
+		vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN;
+		break;
+	case Opt_ntlm:
+		/* ntlm is default so can be turned off too */
+		vol->secFlg |= CIFSSEC_MAY_NTLM;
+		break;
+	case Opt_sec_ntlmi:
+		vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
+		break;
+	case Opt_sec_nontlm:
+		vol->secFlg |= CIFSSEC_MAY_NTLMV2;
+		break;
+	case Opt_sec_ntlmv2i:
+		vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN;
+		break;
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+	case Opt_sec_lanman:
+		vol->secFlg |= CIFSSEC_MAY_LANMAN;
+		break;
+#endif
+	case Opt_sec_none:
+		vol->nullauth = 1;
+		break;
+	default:
+		cERROR(1, "bad security option: %s", value);
+		return 1;
+	}
+
+	return 0;
+}
+
 static int
 cifs_parse_mount_options(const char *mountdata, const char *devname,
 			 struct smb_vol *vol)
 {
-	char *value, *data, *end;
+	char *data, *end;
 	char *mountdata_copy = NULL, *options;
-	int err;
 	unsigned int  temp_len, i, j;
 	char separator[2];
 	short int override_uid = -1;
 	short int override_gid = -1;
 	bool uid_specified = false;
 	bool gid_specified = false;
+	bool sloppy = false;
+	char *invalid = NULL;
 	char *nodename = utsname()->nodename;
+	char *string = NULL;
+	char *tmp_end, *value;
+	char delim;
 
 	separator[0] = ',';
 	separator[1] = 0;
+	delim = separator[0];
 
 	/*
 	 * does not have to be perfect mapping since field is
@@ -981,6 +1236,7 @@
 
 	options = mountdata_copy;
 	end = options + strlen(options);
+
 	if (strncmp(options, "sep=", 4) == 0) {
 		if (options[4] != 0) {
 			separator[0] = options[4];
@@ -993,609 +1249,651 @@
 	vol->backupgid_specified = false; /* no backup intent for a group */
 
 	while ((data = strsep(&options, separator)) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		unsigned long option;
+		int token;
+
 		if (!*data)
 			continue;
-		if ((value = strchr(data, '=')) != NULL)
-			*value++ = '\0';
 
-		/* Have to parse this before we parse for "user" */
-		if (strnicmp(data, "user_xattr", 10) == 0) {
+		token = match_token(data, cifs_mount_option_tokens, args);
+
+		switch (token) {
+
+		/* Ingnore the following */
+		case Opt_ignore:
+			break;
+
+		/* Boolean values */
+		case Opt_user_xattr:
 			vol->no_xattr = 0;
-		} else if (strnicmp(data, "nouser_xattr", 12) == 0) {
+			break;
+		case Opt_nouser_xattr:
 			vol->no_xattr = 1;
-		} else if (strnicmp(data, "user", 4) == 0) {
-			if (!value) {
-				printk(KERN_WARNING
-				       "CIFS: invalid or missing username\n");
-				goto cifs_parse_mount_err;
-			} else if (!*value) {
-				/* null user, ie anonymous, authentication */
-				vol->nullauth = 1;
-			}
-			if (strnlen(value, MAX_USERNAME_SIZE) <
-						MAX_USERNAME_SIZE) {
-				vol->username = kstrdup(value, GFP_KERNEL);
-				if (!vol->username) {
-					printk(KERN_WARNING "CIFS: no memory "
-							    "for username\n");
-					goto cifs_parse_mount_err;
-				}
-			} else {
-				printk(KERN_WARNING "CIFS: username too long\n");
-				goto cifs_parse_mount_err;
-			}
-		} else if (strnicmp(data, "pass", 4) == 0) {
-			if (!value) {
-				vol->password = NULL;
-				continue;
-			} else if (value[0] == 0) {
-				/* check if string begins with double comma
-				   since that would mean the password really
-				   does start with a comma, and would not
-				   indicate an empty string */
-				if (value[1] != separator[0]) {
-					vol->password = NULL;
-					continue;
-				}
-			}
-			temp_len = strlen(value);
-			/* removed password length check, NTLM passwords
-				can be arbitrarily long */
-
-			/* if comma in password, the string will be
-			prematurely null terminated.  Commas in password are
-			specified across the cifs mount interface by a double
-			comma ie ,, and a comma used as in other cases ie ','
-			as a parameter delimiter/separator is single and due
-			to the strsep above is temporarily zeroed. */
-
-			/* NB: password legally can have multiple commas and
-			the only illegal character in a password is null */
-
-			if ((value[temp_len] == 0) &&
-			    (value + temp_len < end) &&
-			    (value[temp_len+1] == separator[0])) {
-				/* reinsert comma */
-				value[temp_len] = separator[0];
-				temp_len += 2;  /* move after second comma */
-				while (value[temp_len] != 0)  {
-					if (value[temp_len] == separator[0]) {
-						if (value[temp_len+1] ==
-						     separator[0]) {
-						/* skip second comma */
-							temp_len++;
-						} else {
-						/* single comma indicating start
-							 of next parm */
-							break;
-						}
-					}
-					temp_len++;
-				}
-				if (value[temp_len] == 0) {
-					options = NULL;
-				} else {
-					value[temp_len] = 0;
-					/* point option to start of next parm */
-					options = value + temp_len + 1;
-				}
-				/* go from value to value + temp_len condensing
-				double commas to singles. Note that this ends up
-				allocating a few bytes too many, which is ok */
-				vol->password = kzalloc(temp_len, GFP_KERNEL);
-				if (vol->password == NULL) {
-					printk(KERN_WARNING "CIFS: no memory "
-							    "for password\n");
-					goto cifs_parse_mount_err;
-				}
-				for (i = 0, j = 0; i < temp_len; i++, j++) {
-					vol->password[j] = value[i];
-					if (value[i] == separator[0]
-						&& value[i+1] == separator[0]) {
-						/* skip second comma */
-						i++;
-					}
-				}
-				vol->password[j] = 0;
-			} else {
-				vol->password = kzalloc(temp_len+1, GFP_KERNEL);
-				if (vol->password == NULL) {
-					printk(KERN_WARNING "CIFS: no memory "
-							    "for password\n");
-					goto cifs_parse_mount_err;
-				}
-				strcpy(vol->password, value);
-			}
-		} else if (!strnicmp(data, "ip", 2) ||
-			   !strnicmp(data, "addr", 4)) {
-			if (!value || !*value) {
-				vol->UNCip = NULL;
-			} else if (strnlen(value, INET6_ADDRSTRLEN) <
-							INET6_ADDRSTRLEN) {
-				vol->UNCip = kstrdup(value, GFP_KERNEL);
-				if (!vol->UNCip) {
-					printk(KERN_WARNING "CIFS: no memory "
-							    "for UNC IP\n");
-					goto cifs_parse_mount_err;
-				}
-			} else {
-				printk(KERN_WARNING "CIFS: ip address "
-						    "too long\n");
-				goto cifs_parse_mount_err;
-			}
-		} else if (strnicmp(data, "sec", 3) == 0) {
-			if (!value || !*value) {
-				cERROR(1, "no security value specified");
-				continue;
-			} else if (strnicmp(value, "krb5i", 5) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_KRB5 |
-					CIFSSEC_MUST_SIGN;
-			} else if (strnicmp(value, "krb5p", 5) == 0) {
-				/* vol->secFlg |= CIFSSEC_MUST_SEAL |
-					CIFSSEC_MAY_KRB5; */
-				cERROR(1, "Krb5 cifs privacy not supported");
-				goto cifs_parse_mount_err;
-			} else if (strnicmp(value, "krb5", 4) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_KRB5;
-			} else if (strnicmp(value, "ntlmsspi", 8) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_NTLMSSP |
-					CIFSSEC_MUST_SIGN;
-			} else if (strnicmp(value, "ntlmssp", 7) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
-			} else if (strnicmp(value, "ntlmv2i", 7) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_NTLMV2 |
-					CIFSSEC_MUST_SIGN;
-			} else if (strnicmp(value, "ntlmv2", 6) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_NTLMV2;
-			} else if (strnicmp(value, "ntlmi", 5) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_NTLM |
-					CIFSSEC_MUST_SIGN;
-			} else if (strnicmp(value, "ntlm", 4) == 0) {
-				/* ntlm is default so can be turned off too */
-				vol->secFlg |= CIFSSEC_MAY_NTLM;
-			} else if (strnicmp(value, "nontlm", 6) == 0) {
-				/* BB is there a better way to do this? */
-				vol->secFlg |= CIFSSEC_MAY_NTLMV2;
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
-			} else if (strnicmp(value, "lanman", 6) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_LANMAN;
-#endif
-			} else if (strnicmp(value, "none", 4) == 0) {
-				vol->nullauth = 1;
-			} else {
-				cERROR(1, "bad security option: %s", value);
-				goto cifs_parse_mount_err;
-			}
-		} else if (strnicmp(data, "vers", 3) == 0) {
-			if (!value || !*value) {
-				cERROR(1, "no protocol version specified"
-					  " after vers= mount option");
-			} else if ((strnicmp(value, "cifs", 4) == 0) ||
-				   (strnicmp(value, "1", 1) == 0)) {
-				/* this is the default */
-				continue;
-			}
-		} else if ((strnicmp(data, "unc", 3) == 0)
-			   || (strnicmp(data, "target", 6) == 0)
-			   || (strnicmp(data, "path", 4) == 0)) {
-			if (!value || !*value) {
-				printk(KERN_WARNING "CIFS: invalid path to "
-						    "network resource\n");
-				goto cifs_parse_mount_err;
-			}
-			if ((temp_len = strnlen(value, 300)) < 300) {
-				vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
-				if (vol->UNC == NULL)
-					goto cifs_parse_mount_err;
-				strcpy(vol->UNC, value);
-				if (strncmp(vol->UNC, "//", 2) == 0) {
-					vol->UNC[0] = '\\';
-					vol->UNC[1] = '\\';
-				} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
-					printk(KERN_WARNING
-					       "CIFS: UNC Path does not begin "
-					       "with // or \\\\ \n");
-					goto cifs_parse_mount_err;
-				}
-			} else {
-				printk(KERN_WARNING "CIFS: UNC name too long\n");
-				goto cifs_parse_mount_err;
-			}
-		} else if ((strnicmp(data, "domain", 3) == 0)
-			   || (strnicmp(data, "workgroup", 5) == 0)) {
-			if (!value || !*value) {
-				printk(KERN_WARNING "CIFS: invalid domain name\n");
-				goto cifs_parse_mount_err;
-			}
-			/* BB are there cases in which a comma can be valid in
-			a domain name and need special handling? */
-			if (strnlen(value, 256) < 256) {
-				vol->domainname = kstrdup(value, GFP_KERNEL);
-				if (!vol->domainname) {
-					printk(KERN_WARNING "CIFS: no memory "
-							    "for domainname\n");
-					goto cifs_parse_mount_err;
-				}
-				cFYI(1, "Domain name set");
-			} else {
-				printk(KERN_WARNING "CIFS: domain name too "
-						    "long\n");
-				goto cifs_parse_mount_err;
-			}
-		} else if (strnicmp(data, "srcaddr", 7) == 0) {
-			vol->srcaddr.ss_family = AF_UNSPEC;
-
-			if (!value || !*value) {
-				printk(KERN_WARNING "CIFS: srcaddr value"
-				       " not specified.\n");
-				goto cifs_parse_mount_err;
-			}
-			i = cifs_convert_address((struct sockaddr *)&vol->srcaddr,
-						 value, strlen(value));
-			if (i == 0) {
-				printk(KERN_WARNING "CIFS:  Could not parse"
-				       " srcaddr: %s\n",
-				       value);
-				goto cifs_parse_mount_err;
-			}
-		} else if (strnicmp(data, "prefixpath", 10) == 0) {
-			if (!value || !*value) {
-				printk(KERN_WARNING
-					"CIFS: invalid path prefix\n");
-				goto cifs_parse_mount_err;
-			}
-			if ((temp_len = strnlen(value, 1024)) < 1024) {
-				if (value[0] != '/')
-					temp_len++;  /* missing leading slash */
-				vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
-				if (vol->prepath == NULL)
-					goto cifs_parse_mount_err;
-				if (value[0] != '/') {
-					vol->prepath[0] = '/';
-					strcpy(vol->prepath+1, value);
-				} else
-					strcpy(vol->prepath, value);
-				cFYI(1, "prefix path %s", vol->prepath);
-			} else {
-				printk(KERN_WARNING "CIFS: prefix too long\n");
-				goto cifs_parse_mount_err;
-			}
-		} else if (strnicmp(data, "iocharset", 9) == 0) {
-			if (!value || !*value) {
-				printk(KERN_WARNING "CIFS: invalid iocharset "
-						    "specified\n");
-				goto cifs_parse_mount_err;
-			}
-			if (strnlen(value, 65) < 65) {
-				if (strnicmp(value, "default", 7)) {
-					vol->iocharset = kstrdup(value,
-								 GFP_KERNEL);
-
-					if (!vol->iocharset) {
-						printk(KERN_WARNING "CIFS: no "
-								   "memory for"
-								   "charset\n");
-						goto cifs_parse_mount_err;
-					}
-				}
-				/* if iocharset not set then load_nls_default
-				   is used by caller */
-				cFYI(1, "iocharset set to %s", value);
-			} else {
-				printk(KERN_WARNING "CIFS: iocharset name "
-						    "too long.\n");
-				goto cifs_parse_mount_err;
-			}
-		} else if (!strnicmp(data, "uid", 3) && value && *value) {
-			vol->linux_uid = simple_strtoul(value, &value, 0);
-			uid_specified = true;
-		} else if (!strnicmp(data, "cruid", 5) && value && *value) {
-			vol->cred_uid = simple_strtoul(value, &value, 0);
-		} else if (!strnicmp(data, "forceuid", 8)) {
+			break;
+		case Opt_forceuid:
 			override_uid = 1;
-		} else if (!strnicmp(data, "noforceuid", 10)) {
+			break;
+		case Opt_noforceuid:
 			override_uid = 0;
-		} else if (!strnicmp(data, "gid", 3) && value && *value) {
-			vol->linux_gid = simple_strtoul(value, &value, 0);
-			gid_specified = true;
-		} else if (!strnicmp(data, "forcegid", 8)) {
-			override_gid = 1;
-		} else if (!strnicmp(data, "noforcegid", 10)) {
-			override_gid = 0;
-		} else if (strnicmp(data, "file_mode", 4) == 0) {
-			if (value && *value) {
-				vol->file_mode =
-					simple_strtoul(value, &value, 0);
-			}
-		} else if (strnicmp(data, "dir_mode", 4) == 0) {
-			if (value && *value) {
-				vol->dir_mode =
-					simple_strtoul(value, &value, 0);
-			}
-		} else if (strnicmp(data, "dirmode", 4) == 0) {
-			if (value && *value) {
-				vol->dir_mode =
-					simple_strtoul(value, &value, 0);
-			}
-		} else if (strnicmp(data, "port", 4) == 0) {
-			if (value && *value) {
-				vol->port =
-					simple_strtoul(value, &value, 0);
-			}
-		} else if (strnicmp(data, "rsize", 5) == 0) {
-			if (value && *value) {
-				vol->rsize =
-					simple_strtoul(value, &value, 0);
-			}
-		} else if (strnicmp(data, "wsize", 5) == 0) {
-			if (value && *value) {
-				vol->wsize =
-					simple_strtoul(value, &value, 0);
-			}
-		} else if (strnicmp(data, "sockopt", 5) == 0) {
-			if (!value || !*value) {
-				cERROR(1, "no socket option specified");
-				continue;
-			} else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
-				vol->sockopt_tcp_nodelay = 1;
-			}
-		} else if (strnicmp(data, "netbiosname", 4) == 0) {
-			if (!value || !*value || (*value == ' ')) {
-				cFYI(1, "invalid (empty) netbiosname");
-			} else {
-				memset(vol->source_rfc1001_name, 0x20,
-					RFC1001_NAME_LEN);
-				/*
-				 * FIXME: are there cases in which a comma can
-				 * be valid in workstation netbios name (and
-				 * need special handling)?
-				 */
-				for (i = 0; i < RFC1001_NAME_LEN; i++) {
-					/* don't ucase netbiosname for user */
-					if (value[i] == 0)
-						break;
-					vol->source_rfc1001_name[i] = value[i];
-				}
-				/* The string has 16th byte zero still from
-				set at top of the function  */
-				if (i == RFC1001_NAME_LEN && value[i] != 0)
-					printk(KERN_WARNING "CIFS: netbiosname"
-						" longer than 15 truncated.\n");
-			}
-		} else if (strnicmp(data, "servern", 7) == 0) {
-			/* servernetbiosname specified override *SMBSERVER */
-			if (!value || !*value || (*value == ' ')) {
-				cFYI(1, "empty server netbiosname specified");
-			} else {
-				/* last byte, type, is 0x20 for servr type */
-				memset(vol->target_rfc1001_name, 0x20,
-					RFC1001_NAME_LEN_WITH_NULL);
-
-				for (i = 0; i < 15; i++) {
-				/* BB are there cases in which a comma can be
-				   valid in this workstation netbios name
-				   (and need special handling)? */
-
-				/* user or mount helper must uppercase
-				   the netbiosname */
-					if (value[i] == 0)
-						break;
-					else
-						vol->target_rfc1001_name[i] =
-								value[i];
-				}
-				/* The string has 16th byte zero still from
-				   set at top of the function  */
-				if (i == RFC1001_NAME_LEN && value[i] != 0)
-					printk(KERN_WARNING "CIFS: server net"
-					"biosname longer than 15 truncated.\n");
-			}
-		} else if (strnicmp(data, "actimeo", 7) == 0) {
-			if (value && *value) {
-				vol->actimeo = HZ * simple_strtoul(value,
-								   &value, 0);
-				if (vol->actimeo > CIFS_MAX_ACTIMEO) {
-					cERROR(1, "CIFS: attribute cache"
-							"timeout too large");
-					goto cifs_parse_mount_err;
-				}
-			}
-		} else if (strnicmp(data, "credentials", 4) == 0) {
-			/* ignore */
-		} else if (strnicmp(data, "version", 3) == 0) {
-			/* ignore */
-		} else if (strnicmp(data, "guest", 5) == 0) {
-			/* ignore */
-		} else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
-			/* ignore */
-		} else if (strnicmp(data, "ro", 2) == 0) {
-			/* ignore */
-		} else if (strnicmp(data, "noblocksend", 11) == 0) {
+			break;
+		case Opt_noblocksend:
 			vol->noblocksnd = 1;
-		} else if (strnicmp(data, "noautotune", 10) == 0) {
+			break;
+		case Opt_noautotune:
 			vol->noautotune = 1;
-		} else if ((strnicmp(data, "suid", 4) == 0) ||
-				   (strnicmp(data, "nosuid", 6) == 0) ||
-				   (strnicmp(data, "exec", 4) == 0) ||
-				   (strnicmp(data, "noexec", 6) == 0) ||
-				   (strnicmp(data, "nodev", 5) == 0) ||
-				   (strnicmp(data, "noauto", 6) == 0) ||
-				   (strnicmp(data, "dev", 3) == 0)) {
-			/*  The mount tool or mount.cifs helper (if present)
-			    uses these opts to set flags, and the flags are read
-			    by the kernel vfs layer before we get here (ie
-			    before read super) so there is no point trying to
-			    parse these options again and set anything and it
-			    is ok to just ignore them */
-			continue;
-		} else if (strnicmp(data, "hard", 4) == 0) {
+			break;
+		case Opt_hard:
 			vol->retry = 1;
-		} else if (strnicmp(data, "soft", 4) == 0) {
+			break;
+		case Opt_soft:
 			vol->retry = 0;
-		} else if (strnicmp(data, "perm", 4) == 0) {
+			break;
+		case Opt_perm:
 			vol->noperm = 0;
-		} else if (strnicmp(data, "noperm", 6) == 0) {
+			break;
+		case Opt_noperm:
 			vol->noperm = 1;
-		} else if (strnicmp(data, "mapchars", 8) == 0) {
+			break;
+		case Opt_mapchars:
 			vol->remap = 1;
-		} else if (strnicmp(data, "nomapchars", 10) == 0) {
+			break;
+		case Opt_nomapchars:
 			vol->remap = 0;
-		} else if (strnicmp(data, "sfu", 3) == 0) {
+			break;
+		case Opt_sfu:
 			vol->sfu_emul = 1;
-		} else if (strnicmp(data, "nosfu", 5) == 0) {
+			break;
+		case Opt_nosfu:
 			vol->sfu_emul = 0;
-		} else if (strnicmp(data, "nodfs", 5) == 0) {
+			break;
+		case Opt_nodfs:
 			vol->nodfs = 1;
-		} else if (strnicmp(data, "posixpaths", 10) == 0) {
+			break;
+		case Opt_posixpaths:
 			vol->posix_paths = 1;
-		} else if (strnicmp(data, "noposixpaths", 12) == 0) {
+			break;
+		case Opt_noposixpaths:
 			vol->posix_paths = 0;
-		} else if (strnicmp(data, "nounix", 6) == 0) {
+			break;
+		case Opt_nounix:
 			vol->no_linux_ext = 1;
-		} else if (strnicmp(data, "nolinux", 7) == 0) {
-			vol->no_linux_ext = 1;
-		} else if ((strnicmp(data, "nocase", 6) == 0) ||
-			   (strnicmp(data, "ignorecase", 10)  == 0)) {
+			break;
+		case Opt_nocase:
 			vol->nocase = 1;
-		} else if (strnicmp(data, "mand", 4) == 0) {
-			/* ignore */
-		} else if (strnicmp(data, "nomand", 6) == 0) {
-			/* ignore */
-		} else if (strnicmp(data, "_netdev", 7) == 0) {
-			/* ignore */
-		} else if (strnicmp(data, "brl", 3) == 0) {
+			break;
+		case Opt_brl:
 			vol->nobrl =  0;
-		} else if ((strnicmp(data, "nobrl", 5) == 0) ||
-			   (strnicmp(data, "nolock", 6) == 0)) {
+			break;
+		case Opt_nobrl:
 			vol->nobrl =  1;
-			/* turn off mandatory locking in mode
-			if remote locking is turned off since the
-			local vfs will do advisory */
+			/*
+			 * turn off mandatory locking in mode
+			 * if remote locking is turned off since the
+			 * local vfs will do advisory
+			 */
 			if (vol->file_mode ==
 				(S_IALLUGO & ~(S_ISUID | S_IXGRP)))
 				vol->file_mode = S_IALLUGO;
-		} else if (strnicmp(data, "forcemandatorylock", 9) == 0) {
-			/* will take the shorter form "forcemand" as well */
-			/* This mount option will force use of mandatory
-			  (DOS/Windows style) byte range locks, instead of
-			  using posix advisory byte range locks, even if the
-			  Unix extensions are available and posix locks would
-			  be supported otherwise. If Unix extensions are not
-			  negotiated this has no effect since mandatory locks
-			  would be used (mandatory locks is all that those
-			  those servers support) */
+			break;
+		case Opt_forcemandatorylock:
 			vol->mand_lock = 1;
-		} else if (strnicmp(data, "setuids", 7) == 0) {
+			break;
+		case Opt_setuids:
 			vol->setuids = 1;
-		} else if (strnicmp(data, "nosetuids", 9) == 0) {
+			break;
+		case Opt_nosetuids:
 			vol->setuids = 0;
-		} else if (strnicmp(data, "dynperm", 7) == 0) {
+			break;
+		case Opt_dynperm:
 			vol->dynperm = true;
-		} else if (strnicmp(data, "nodynperm", 9) == 0) {
+			break;
+		case Opt_nodynperm:
 			vol->dynperm = false;
-		} else if (strnicmp(data, "nohard", 6) == 0) {
+			break;
+		case Opt_nohard:
 			vol->retry = 0;
-		} else if (strnicmp(data, "nosoft", 6) == 0) {
+			break;
+		case Opt_nosoft:
 			vol->retry = 1;
-		} else if (strnicmp(data, "nointr", 6) == 0) {
+			break;
+		case Opt_nointr:
 			vol->intr = 0;
-		} else if (strnicmp(data, "intr", 4) == 0) {
+			break;
+		case Opt_intr:
 			vol->intr = 1;
-		} else if (strnicmp(data, "nostrictsync", 12) == 0) {
+			break;
+		case Opt_nostrictsync:
 			vol->nostrictsync = 1;
-		} else if (strnicmp(data, "strictsync", 10) == 0) {
+			break;
+		case Opt_strictsync:
 			vol->nostrictsync = 0;
-		} else if (strnicmp(data, "serverino", 7) == 0) {
+			break;
+		case Opt_serverino:
 			vol->server_ino = 1;
-		} else if (strnicmp(data, "noserverino", 9) == 0) {
+			break;
+		case Opt_noserverino:
 			vol->server_ino = 0;
-		} else if (strnicmp(data, "rwpidforward", 12) == 0) {
+			break;
+		case Opt_rwpidforward:
 			vol->rwpidforward = 1;
-		} else if (strnicmp(data, "cifsacl", 7) == 0) {
+			break;
+		case Opt_cifsacl:
 			vol->cifs_acl = 1;
-		} else if (strnicmp(data, "nocifsacl", 9) == 0) {
+			break;
+		case Opt_nocifsacl:
 			vol->cifs_acl = 0;
-		} else if (strnicmp(data, "acl", 3) == 0) {
+			break;
+		case Opt_acl:
 			vol->no_psx_acl = 0;
-		} else if (strnicmp(data, "noacl", 5) == 0) {
+			break;
+		case Opt_noacl:
 			vol->no_psx_acl = 1;
-		} else if (strnicmp(data, "locallease", 6) == 0) {
+			break;
+		case Opt_locallease:
 			vol->local_lease = 1;
-		} else if (strnicmp(data, "sign", 4) == 0) {
+			break;
+		case Opt_sign:
 			vol->secFlg |= CIFSSEC_MUST_SIGN;
-		} else if (strnicmp(data, "seal", 4) == 0) {
+			break;
+		case Opt_seal:
 			/* we do not do the following in secFlags because seal
-			   is a per tree connection (mount) not a per socket
-			   or per-smb connection option in the protocol */
-			/* vol->secFlg |= CIFSSEC_MUST_SEAL; */
+			 * is a per tree connection (mount) not a per socket
+			 * or per-smb connection option in the protocol
+			 * vol->secFlg |= CIFSSEC_MUST_SEAL;
+			 */
 			vol->seal = 1;
-		} else if (strnicmp(data, "direct", 6) == 0) {
+			break;
+		case Opt_direct:
 			vol->direct_io = 1;
-		} else if (strnicmp(data, "forcedirectio", 13) == 0) {
-			vol->direct_io = 1;
-		} else if (strnicmp(data, "strictcache", 11) == 0) {
+			break;
+		case Opt_strictcache:
 			vol->strict_io = 1;
-		} else if (strnicmp(data, "noac", 4) == 0) {
+			break;
+		case Opt_noac:
 			printk(KERN_WARNING "CIFS: Mount option noac not "
 				"supported. Instead set "
 				"/proc/fs/cifs/LookupCacheEnabled to 0\n");
-		} else if (strnicmp(data, "fsc", 3) == 0) {
+			break;
+		case Opt_fsc:
 #ifndef CONFIG_CIFS_FSCACHE
 			cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE "
 				  "kernel config option set");
 			goto cifs_parse_mount_err;
 #endif
 			vol->fsc = true;
-		} else if (strnicmp(data, "mfsymlinks", 10) == 0) {
+			break;
+		case Opt_mfsymlinks:
 			vol->mfsymlinks = true;
-		} else if (strnicmp(data, "multiuser", 8) == 0) {
+			break;
+		case Opt_multiuser:
 			vol->multiuser = true;
-		} else if (!strnicmp(data, "backupuid", 9) && value && *value) {
-			err = kstrtouint(value, 0, &vol->backupuid);
-			if (err < 0) {
+			break;
+		case Opt_sloppy:
+			sloppy = true;
+			break;
+
+		/* Numeric Values */
+		case Opt_backupuid:
+			if (get_option_ul(args, &option)) {
 				cERROR(1, "%s: Invalid backupuid value",
 					__func__);
 				goto cifs_parse_mount_err;
 			}
+			vol->backupuid = option;
 			vol->backupuid_specified = true;
-		} else if (!strnicmp(data, "backupgid", 9) && value && *value) {
-			err = kstrtouint(value, 0, &vol->backupgid);
-			if (err < 0) {
+			break;
+		case Opt_backupgid:
+			if (get_option_ul(args, &option)) {
 				cERROR(1, "%s: Invalid backupgid value",
 					__func__);
 				goto cifs_parse_mount_err;
 			}
+			vol->backupgid = option;
 			vol->backupgid_specified = true;
-		} else
-			printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
-						data);
-	}
-	if (vol->UNC == NULL) {
-		if (devname == NULL) {
-			printk(KERN_WARNING "CIFS: Missing UNC name for mount "
-						"target\n");
-			goto cifs_parse_mount_err;
-		}
-		if ((temp_len = strnlen(devname, 300)) < 300) {
-			vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
-			if (vol->UNC == NULL)
-				goto cifs_parse_mount_err;
-			strcpy(vol->UNC, devname);
-			if (strncmp(vol->UNC, "//", 2) == 0) {
-				vol->UNC[0] = '\\';
-				vol->UNC[1] = '\\';
-			} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
-				printk(KERN_WARNING "CIFS: UNC Path does not "
-						    "begin with // or \\\\ \n");
+			break;
+		case Opt_uid:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid uid value",
+					__func__);
 				goto cifs_parse_mount_err;
 			}
-			value = strpbrk(vol->UNC+2, "/\\");
-			if (value)
-				*value = '\\';
-		} else {
-			printk(KERN_WARNING "CIFS: UNC name too long\n");
+			vol->linux_uid = option;
+			uid_specified = true;
+			break;
+		case Opt_cruid:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid cruid value",
+					__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->cred_uid = option;
+			break;
+		case Opt_gid:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid gid value",
+						__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->linux_gid = option;
+			gid_specified = true;
+			break;
+		case Opt_file_mode:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid file_mode value",
+					__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->file_mode = option;
+			break;
+		case Opt_dirmode:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid dir_mode value",
+					__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->dir_mode = option;
+			break;
+		case Opt_port:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid port value",
+					__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->port = option;
+			break;
+		case Opt_rsize:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid rsize value",
+					__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->rsize = option;
+			break;
+		case Opt_wsize:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid wsize value",
+					__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->wsize = option;
+			break;
+		case Opt_actimeo:
+			if (get_option_ul(args, &option)) {
+				cERROR(1, "%s: Invalid actimeo value",
+					__func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->actimeo = HZ * option;
+			if (vol->actimeo > CIFS_MAX_ACTIMEO) {
+				cERROR(1, "CIFS: attribute cache"
+					  "timeout too large");
+				goto cifs_parse_mount_err;
+			}
+			break;
+
+		/* String Arguments */
+
+		case Opt_user:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				/* null user, ie. anonymous authentication */
+				vol->nullauth = 1;
+			} else if (strnlen(string, MAX_USERNAME_SIZE) >
+							MAX_USERNAME_SIZE) {
+				printk(KERN_WARNING "CIFS: username too long\n");
+				goto cifs_parse_mount_err;
+			}
+			vol->username = kstrdup(string, GFP_KERNEL);
+			if (!vol->username) {
+				printk(KERN_WARNING "CIFS: no memory "
+						    "for username\n");
+				goto cifs_parse_mount_err;
+			}
+			break;
+		case Opt_blank_pass:
+			vol->password = NULL;
+			break;
+		case Opt_pass:
+			/* passwords have to be handled differently
+			 * to allow the character used for deliminator
+			 * to be passed within them
+			 */
+
+			/* Obtain the value string */
+			value = strchr(data, '=');
+			value++;
+
+			/* Set tmp_end to end of the string */
+			tmp_end = (char *) value + strlen(value);
+
+			/* Check if following character is the deliminator
+			 * If yes, we have encountered a double deliminator
+			 * reset the NULL character to the deliminator
+			 */
+			if (tmp_end < end && tmp_end[1] == delim)
+				tmp_end[0] = delim;
+
+			/* Keep iterating until we get to a single deliminator
+			 * OR the end
+			 */
+			while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
+			       (tmp_end[1] == delim)) {
+				tmp_end = (char *) &tmp_end[2];
+			}
+
+			/* Reset var options to point to next element */
+			if (tmp_end) {
+				tmp_end[0] = '\0';
+				options = (char *) &tmp_end[1];
+			} else
+				/* Reached the end of the mount option string */
+				options = end;
+
+			/* Now build new password string */
+			temp_len = strlen(value);
+			vol->password = kzalloc(temp_len+1, GFP_KERNEL);
+			if (vol->password == NULL) {
+				printk(KERN_WARNING "CIFS: no memory "
+						    "for password\n");
+				goto cifs_parse_mount_err;
+			}
+
+			for (i = 0, j = 0; i < temp_len; i++, j++) {
+				vol->password[j] = value[i];
+				if ((value[i] == delim) &&
+				     value[i+1] == delim)
+					/* skip the second deliminator */
+					i++;
+			}
+			vol->password[j] = '\0';
+			break;
+		case Opt_ip:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				vol->UNCip = NULL;
+			} else if (strnlen(string, INET6_ADDRSTRLEN) >
+						INET6_ADDRSTRLEN) {
+				printk(KERN_WARNING "CIFS: ip address "
+						    "too long\n");
+				goto cifs_parse_mount_err;
+			}
+			vol->UNCip = kstrdup(string, GFP_KERNEL);
+			if (!vol->UNCip) {
+				printk(KERN_WARNING "CIFS: no memory "
+						    "for UNC IP\n");
+				goto cifs_parse_mount_err;
+			}
+			break;
+		case Opt_unc:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: invalid path to "
+						    "network resource\n");
+				goto cifs_parse_mount_err;
+			}
+
+			temp_len = strnlen(string, 300);
+			if (temp_len  == 300) {
+				printk(KERN_WARNING "CIFS: UNC name too long\n");
+				goto cifs_parse_mount_err;
+			}
+
+			vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
+			if (vol->UNC == NULL) {
+				printk(KERN_WARNING "CIFS: no memory for UNC\n");
+				goto cifs_parse_mount_err;
+			}
+			strcpy(vol->UNC, string);
+
+			if (strncmp(string, "//", 2) == 0) {
+				vol->UNC[0] = '\\';
+				vol->UNC[1] = '\\';
+			} else if (strncmp(string, "\\\\", 2) != 0) {
+				printk(KERN_WARNING "CIFS: UNC Path does not "
+						    "begin with // or \\\\\n");
+				goto cifs_parse_mount_err;
+			}
+
+			break;
+		case Opt_domain:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: invalid domain"
+						    " name\n");
+				goto cifs_parse_mount_err;
+			} else if (strnlen(string, 256) == 256) {
+				printk(KERN_WARNING "CIFS: domain name too"
+						    " long\n");
+				goto cifs_parse_mount_err;
+			}
+
+			vol->domainname = kstrdup(string, GFP_KERNEL);
+			if (!vol->domainname) {
+				printk(KERN_WARNING "CIFS: no memory "
+						    "for domainname\n");
+				goto cifs_parse_mount_err;
+			}
+			cFYI(1, "Domain name set");
+			break;
+		case Opt_srcaddr:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: srcaddr value not"
+						    " specified\n");
+				goto cifs_parse_mount_err;
+			} else if (!cifs_convert_address(
+					(struct sockaddr *)&vol->srcaddr,
+					string, strlen(string))) {
+				printk(KERN_WARNING "CIFS:  Could not parse"
+						    " srcaddr: %s\n", string);
+				goto cifs_parse_mount_err;
+			}
+			break;
+		case Opt_prefixpath:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: Invalid path"
+						    " prefix\n");
+				goto cifs_parse_mount_err;
+			}
+			temp_len = strnlen(string, 1024);
+			if (string[0] != '/')
+				temp_len++; /* missing leading slash */
+			if (temp_len > 1024) {
+				printk(KERN_WARNING "CIFS: prefix too long\n");
+				goto cifs_parse_mount_err;
+			}
+
+			vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
+			if (vol->prepath == NULL) {
+				printk(KERN_WARNING "CIFS: no memory "
+						    "for path prefix\n");
+				goto cifs_parse_mount_err;
+			}
+
+			if (string[0] != '/') {
+				vol->prepath[0] = '/';
+				strcpy(vol->prepath+1, string);
+			} else
+				strcpy(vol->prepath, string);
+
+			break;
+		case Opt_iocharset:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: Invalid iocharset"
+						    " specified\n");
+				goto cifs_parse_mount_err;
+			} else if (strnlen(string, 1024) >= 65) {
+				printk(KERN_WARNING "CIFS: iocharset name "
+						    "too long.\n");
+				goto cifs_parse_mount_err;
+			}
+
+			 if (strnicmp(string, "default", 7) != 0) {
+				vol->iocharset = kstrdup(string,
+							 GFP_KERNEL);
+				if (!vol->iocharset) {
+					printk(KERN_WARNING "CIFS: no memory"
+							    "for charset\n");
+					goto cifs_parse_mount_err;
+				}
+			}
+			/* if iocharset not set then load_nls_default
+			 * is used by caller
+			 */
+			cFYI(1, "iocharset set to %s", string);
+			break;
+		case Opt_sockopt:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: No socket option"
+						    " specified\n");
+				goto cifs_parse_mount_err;
+			}
+			if (strnicmp(string, "TCP_NODELAY", 11) == 0)
+				vol->sockopt_tcp_nodelay = 1;
+			break;
+		case Opt_netbiosname:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: Invalid (empty)"
+						    " netbiosname\n");
+				break;
+			}
+
+			memset(vol->source_rfc1001_name, 0x20,
+				RFC1001_NAME_LEN);
+			/*
+			 * FIXME: are there cases in which a comma can
+			 * be valid in workstation netbios name (and
+			 * need special handling)?
+			 */
+			for (i = 0; i < RFC1001_NAME_LEN; i++) {
+				/* don't ucase netbiosname for user */
+				if (string[i] == 0)
+					break;
+				vol->source_rfc1001_name[i] = string[i];
+			}
+			/* The string has 16th byte zero still from
+			 * set at top of the function
+			 */
+			if (i == RFC1001_NAME_LEN && string[i] != 0)
+				printk(KERN_WARNING "CIFS: netbiosname"
+				       " longer than 15 truncated.\n");
+
+			break;
+		case Opt_servern:
+			/* servernetbiosname specified override *SMBSERVER */
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: Empty server"
+					" netbiosname specified\n");
+				break;
+			}
+			/* last byte, type, is 0x20 for servr type */
+			memset(vol->target_rfc1001_name, 0x20,
+				RFC1001_NAME_LEN_WITH_NULL);
+
+			/* BB are there cases in which a comma can be
+			   valid in this workstation netbios name
+			   (and need special handling)? */
+
+			/* user or mount helper must uppercase the
+			   netbios name */
+			for (i = 0; i < 15; i++) {
+				if (string[i] == 0)
+					break;
+				vol->target_rfc1001_name[i] = string[i];
+			}
+			/* The string has 16th byte zero still from
+			   set at top of the function  */
+			if (i == RFC1001_NAME_LEN && string[i] != 0)
+				printk(KERN_WARNING "CIFS: server net"
+				       "biosname longer than 15 truncated.\n");
+			break;
+		case Opt_ver:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				cERROR(1, "no protocol version specified"
+					  " after vers= mount option");
+				goto cifs_parse_mount_err;
+			}
+
+			if (strnicmp(string, "cifs", 4) == 0 ||
+			    strnicmp(string, "1", 1) == 0) {
+				/* This is the default */
+				break;
+			}
+			/* For all other value, error */
+			printk(KERN_WARNING "CIFS: Invalid version"
+					    " specified\n");
 			goto cifs_parse_mount_err;
+		case Opt_sec:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+
+			if (!*string) {
+				printk(KERN_WARNING "CIFS: no security flavor"
+						    " specified\n");
+				break;
+			}
+
+			if (cifs_parse_security_flavors(string, vol) != 0)
+				goto cifs_parse_mount_err;
+			break;
+		default:
+			/*
+			 * An option we don't recognize. Save it off for later
+			 * if we haven't already found one
+			 */
+			if (!invalid)
+				invalid = data;
+			break;
 		}
+		/* Free up any allocated string */
+		kfree(string);
+		string = NULL;
+	}
+
+	if (!sloppy && invalid) {
+		printk(KERN_ERR "CIFS: Unknown mount option \"%s\"\n", invalid);
+		goto cifs_parse_mount_err;
 	}
 
 #ifndef CONFIG_KEYS
@@ -1625,7 +1923,10 @@
 	kfree(mountdata_copy);
 	return 0;
 
+out_nomem:
+	printk(KERN_WARNING "Could not allocate temporary buffer\n");
 cifs_parse_mount_err:
+	kfree(string);
 	kfree(mountdata_copy);
 	return 1;
 }
@@ -1977,7 +2278,7 @@
 	cifs_fscache_get_client_cookie(tcp_ses);
 
 	/* queue echo request delayed work */
-	queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
+	queue_delayed_work(cifsiod_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
 
 	return tcp_ses;
 
@@ -3543,7 +3844,7 @@
 	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
 	spin_unlock(&cifs_sb->tlink_tree_lock);
 
-	queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
 				TLINK_IDLE_EXPIRE);
 
 mount_fail_check:
@@ -4097,6 +4398,6 @@
 	}
 	spin_unlock(&cifs_sb->tlink_tree_lock);
 
-	queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
 				TLINK_IDLE_EXPIRE);
 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 159fcc5..fae765d 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -835,13 +835,21 @@
 	if ((flock->fl_flags & FL_POSIX) == 0)
 		return rc;
 
+try_again:
 	mutex_lock(&cinode->lock_mutex);
 	if (!cinode->can_cache_brlcks) {
 		mutex_unlock(&cinode->lock_mutex);
 		return rc;
 	}
-	rc = posix_lock_file_wait(file, flock);
+
+	rc = posix_lock_file(file, flock, NULL);
 	mutex_unlock(&cinode->lock_mutex);
+	if (rc == FILE_LOCK_DEFERRED) {
+		rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
+		if (!rc)
+			goto try_again;
+		locks_delete_block(flock);
+	}
 	return rc;
 }
 
@@ -1399,7 +1407,10 @@
 	return rc;
 }
 
-/* update the file size (if needed) after a write */
+/*
+ * update the file size (if needed) after a write. Should be called with
+ * the inode->i_lock held
+ */
 void
 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
 		      unsigned int bytes_written)
@@ -1471,7 +1482,9 @@
 				return rc;
 			}
 		} else {
+			spin_lock(&dentry->d_inode->i_lock);
 			cifs_update_eof(cifsi, *poffset, bytes_written);
+			spin_unlock(&dentry->d_inode->i_lock);
 			*poffset += bytes_written;
 		}
 	}
@@ -1648,6 +1661,27 @@
 	return rc;
 }
 
+/*
+ * Marshal up the iov array, reserving the first one for the header. Also,
+ * set wdata->bytes.
+ */
+static void
+cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
+{
+	int i;
+	struct inode *inode = wdata->cfile->dentry->d_inode;
+	loff_t size = i_size_read(inode);
+
+	/* marshal up the pages into iov array */
+	wdata->bytes = 0;
+	for (i = 0; i < wdata->nr_pages; i++) {
+		iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
+					(loff_t)PAGE_CACHE_SIZE);
+		iov[i + 1].iov_base = kmap(wdata->pages[i]);
+		wdata->bytes += iov[i + 1].iov_len;
+	}
+}
+
 static int cifs_writepages(struct address_space *mapping,
 			   struct writeback_control *wbc)
 {
@@ -1684,7 +1718,8 @@
 		tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
 				end - index) + 1;
 
-		wdata = cifs_writedata_alloc((unsigned int)tofind);
+		wdata = cifs_writedata_alloc((unsigned int)tofind,
+					     cifs_writev_complete);
 		if (!wdata) {
 			rc = -ENOMEM;
 			break;
@@ -1791,6 +1826,7 @@
 		wdata->sync_mode = wbc->sync_mode;
 		wdata->nr_pages = nr_pages;
 		wdata->offset = page_offset(wdata->pages[0]);
+		wdata->marshal_iov = cifs_writepages_marshal_iov;
 
 		do {
 			if (wdata->cfile != NULL)
@@ -1802,6 +1838,7 @@
 				rc = -EBADF;
 				break;
 			}
+			wdata->pid = wdata->cfile->pid;
 			rc = cifs_async_writev(wdata);
 		} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
 
@@ -2043,7 +2080,7 @@
 	unsigned long i;
 
 	for (i = 0; i < num_pages; i++) {
-		pages[i] = alloc_page(__GFP_HIGHMEM);
+		pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
 		if (!pages[i]) {
 			/*
 			 * save number of pages we have already allocated and
@@ -2051,15 +2088,14 @@
 			 */
 			num_pages = i;
 			rc = -ENOMEM;
-			goto error;
+			break;
 		}
 	}
 
-	return rc;
-
-error:
-	for (i = 0; i < num_pages; i++)
-		put_page(pages[i]);
+	if (rc) {
+		for (i = 0; i < num_pages; i++)
+			put_page(pages[i]);
+	}
 	return rc;
 }
 
@@ -2070,9 +2106,7 @@
 	size_t clen;
 
 	clen = min_t(const size_t, len, wsize);
-	num_pages = clen / PAGE_CACHE_SIZE;
-	if (clen % PAGE_CACHE_SIZE)
-		num_pages++;
+	num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
 
 	if (cur_len)
 		*cur_len = clen;
@@ -2080,24 +2114,79 @@
 	return num_pages;
 }
 
+static void
+cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
+{
+	int i;
+	size_t bytes = wdata->bytes;
+
+	/* marshal up the pages into iov array */
+	for (i = 0; i < wdata->nr_pages; i++) {
+		iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
+		iov[i + 1].iov_base = kmap(wdata->pages[i]);
+		bytes -= iov[i + 1].iov_len;
+	}
+}
+
+static void
+cifs_uncached_writev_complete(struct work_struct *work)
+{
+	int i;
+	struct cifs_writedata *wdata = container_of(work,
+					struct cifs_writedata, work);
+	struct inode *inode = wdata->cfile->dentry->d_inode;
+	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+
+	spin_lock(&inode->i_lock);
+	cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
+	if (cifsi->server_eof > inode->i_size)
+		i_size_write(inode, cifsi->server_eof);
+	spin_unlock(&inode->i_lock);
+
+	complete(&wdata->done);
+
+	if (wdata->result != -EAGAIN) {
+		for (i = 0; i < wdata->nr_pages; i++)
+			put_page(wdata->pages[i]);
+	}
+
+	kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+/* attempt to send write to server, retry on any -EAGAIN errors */
+static int
+cifs_uncached_retry_writev(struct cifs_writedata *wdata)
+{
+	int rc;
+
+	do {
+		if (wdata->cfile->invalidHandle) {
+			rc = cifs_reopen_file(wdata->cfile, false);
+			if (rc != 0)
+				continue;
+		}
+		rc = cifs_async_writev(wdata);
+	} while (rc == -EAGAIN);
+
+	return rc;
+}
+
 static ssize_t
 cifs_iovec_write(struct file *file, const struct iovec *iov,
 		 unsigned long nr_segs, loff_t *poffset)
 {
-	unsigned int written;
-	unsigned long num_pages, npages, i;
+	unsigned long nr_pages, i;
 	size_t copied, len, cur_len;
 	ssize_t total_written = 0;
-	struct kvec *to_send;
-	struct page **pages;
+	loff_t offset = *poffset;
 	struct iov_iter it;
-	struct inode *inode;
 	struct cifsFileInfo *open_file;
-	struct cifs_tcon *pTcon;
+	struct cifs_tcon *tcon;
 	struct cifs_sb_info *cifs_sb;
-	struct cifs_io_parms io_parms;
-	int xid, rc;
-	__u32 pid;
+	struct cifs_writedata *wdata, *tmp;
+	struct list_head wdata_list;
+	int rc;
+	pid_t pid;
 
 	len = iov_length(iov, nr_segs);
 	if (!len)
@@ -2107,103 +2196,103 @@
 	if (rc)
 		return rc;
 
+	INIT_LIST_HEAD(&wdata_list);
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-	num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
-
-	pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
-	if (!pages)
-		return -ENOMEM;
-
-	to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
-	if (!to_send) {
-		kfree(pages);
-		return -ENOMEM;
-	}
-
-	rc = cifs_write_allocate_pages(pages, num_pages);
-	if (rc) {
-		kfree(pages);
-		kfree(to_send);
-		return rc;
-	}
-
-	xid = GetXid();
 	open_file = file->private_data;
+	tcon = tlink_tcon(open_file->tlink);
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
 		pid = open_file->pid;
 	else
 		pid = current->tgid;
 
-	pTcon = tlink_tcon(open_file->tlink);
-	inode = file->f_path.dentry->d_inode;
-
 	iov_iter_init(&it, iov, nr_segs, len, 0);
-	npages = num_pages;
-
 	do {
-		size_t save_len = cur_len;
-		for (i = 0; i < npages; i++) {
-			copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
-			copied = iov_iter_copy_from_user(pages[i], &it, 0,
-							 copied);
-			cur_len -= copied;
-			iov_iter_advance(&it, copied);
-			to_send[i+1].iov_base = kmap(pages[i]);
-			to_send[i+1].iov_len = copied;
-		}
+		size_t save_len;
 
-		cur_len = save_len - cur_len;
-
-		do {
-			if (open_file->invalidHandle) {
-				rc = cifs_reopen_file(open_file, false);
-				if (rc != 0)
-					break;
-			}
-			io_parms.netfid = open_file->netfid;
-			io_parms.pid = pid;
-			io_parms.tcon = pTcon;
-			io_parms.offset = *poffset;
-			io_parms.length = cur_len;
-			rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
-					   npages, 0);
-		} while (rc == -EAGAIN);
-
-		for (i = 0; i < npages; i++)
-			kunmap(pages[i]);
-
-		if (written) {
-			len -= written;
-			total_written += written;
-			cifs_update_eof(CIFS_I(inode), *poffset, written);
-			*poffset += written;
-		} else if (rc < 0) {
-			if (!total_written)
-				total_written = rc;
+		nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+		wdata = cifs_writedata_alloc(nr_pages,
+					     cifs_uncached_writev_complete);
+		if (!wdata) {
+			rc = -ENOMEM;
 			break;
 		}
 
-		/* get length and number of kvecs of the next write */
-		npages = get_numpages(cifs_sb->wsize, len, &cur_len);
+		rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
+		if (rc) {
+			kfree(wdata);
+			break;
+		}
+
+		save_len = cur_len;
+		for (i = 0; i < nr_pages; i++) {
+			copied = min_t(const size_t, cur_len, PAGE_SIZE);
+			copied = iov_iter_copy_from_user(wdata->pages[i], &it,
+							 0, copied);
+			cur_len -= copied;
+			iov_iter_advance(&it, copied);
+		}
+		cur_len = save_len - cur_len;
+
+		wdata->sync_mode = WB_SYNC_ALL;
+		wdata->nr_pages = nr_pages;
+		wdata->offset = (__u64)offset;
+		wdata->cfile = cifsFileInfo_get(open_file);
+		wdata->pid = pid;
+		wdata->bytes = cur_len;
+		wdata->marshal_iov = cifs_uncached_marshal_iov;
+		rc = cifs_uncached_retry_writev(wdata);
+		if (rc) {
+			kref_put(&wdata->refcount, cifs_writedata_release);
+			break;
+		}
+
+		list_add_tail(&wdata->list, &wdata_list);
+		offset += cur_len;
+		len -= cur_len;
 	} while (len > 0);
 
-	if (total_written > 0) {
-		spin_lock(&inode->i_lock);
-		if (*poffset > inode->i_size)
-			i_size_write(inode, *poffset);
-		spin_unlock(&inode->i_lock);
+	/*
+	 * If at least one write was successfully sent, then discard any rc
+	 * value from the later writes. If the other write succeeds, then
+	 * we'll end up returning whatever was written. If it fails, then
+	 * we'll get a new rc value from that.
+	 */
+	if (!list_empty(&wdata_list))
+		rc = 0;
+
+	/*
+	 * Wait for and collect replies for any successful sends in order of
+	 * increasing offset. Once an error is hit or we get a fatal signal
+	 * while waiting, then return without waiting for any more replies.
+	 */
+restart_loop:
+	list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
+		if (!rc) {
+			/* FIXME: freezable too? */
+			rc = wait_for_completion_killable(&wdata->done);
+			if (rc)
+				rc = -EINTR;
+			else if (wdata->result)
+				rc = wdata->result;
+			else
+				total_written += wdata->bytes;
+
+			/* resend call if it's a retryable error */
+			if (rc == -EAGAIN) {
+				rc = cifs_uncached_retry_writev(wdata);
+				goto restart_loop;
+			}
+		}
+		list_del_init(&wdata->list);
+		kref_put(&wdata->refcount, cifs_writedata_release);
 	}
 
-	cifs_stats_bytes_written(pTcon, total_written);
-	mark_inode_dirty_sync(inode);
+	if (total_written > 0)
+		*poffset += total_written;
 
-	for (i = 0; i < num_pages; i++)
-		put_page(pages[i]);
-	kfree(to_send);
-	kfree(pages);
-	FreeXid(xid);
-	return total_written;
+	cifs_stats_bytes_written(tcon, total_written);
+	return total_written ? total_written : (ssize_t)rc;
 }
 
 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c273c12..c29d1aa 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -213,55 +213,62 @@
 }
 
 /*
-	Find a free multiplex id (SMB mid). Otherwise there could be
-	mid collisions which might cause problems, demultiplexing the
-	wrong response to this request. Multiplex ids could collide if
-	one of a series requests takes much longer than the others, or
-	if a very large number of long lived requests (byte range
-	locks or FindNotify requests) are pending.  No more than
-	64K-1 requests can be outstanding at one time.  If no
-	mids are available, return zero.  A future optimization
-	could make the combination of mids and uid the key we use
-	to demultiplex on (rather than mid alone).
-	In addition to the above check, the cifs demultiplex
-	code already used the command code as a secondary
-	check of the frame and if signing is negotiated the
-	response would be discarded if the mid were the same
-	but the signature was wrong.  Since the mid is not put in the
-	pending queue until later (when it is about to be dispatched)
-	we do have to limit the number of outstanding requests
-	to somewhat less than 64K-1 although it is hard to imagine
-	so many threads being in the vfs at one time.
-*/
-__u16 GetNextMid(struct TCP_Server_Info *server)
+ * Find a free multiplex id (SMB mid). Otherwise there could be
+ * mid collisions which might cause problems, demultiplexing the
+ * wrong response to this request. Multiplex ids could collide if
+ * one of a series requests takes much longer than the others, or
+ * if a very large number of long lived requests (byte range
+ * locks or FindNotify requests) are pending. No more than
+ * 64K-1 requests can be outstanding at one time. If no
+ * mids are available, return zero. A future optimization
+ * could make the combination of mids and uid the key we use
+ * to demultiplex on (rather than mid alone).
+ * In addition to the above check, the cifs demultiplex
+ * code already used the command code as a secondary
+ * check of the frame and if signing is negotiated the
+ * response would be discarded if the mid were the same
+ * but the signature was wrong. Since the mid is not put in the
+ * pending queue until later (when it is about to be dispatched)
+ * we do have to limit the number of outstanding requests
+ * to somewhat less than 64K-1 although it is hard to imagine
+ * so many threads being in the vfs at one time.
+ */
+__u64 GetNextMid(struct TCP_Server_Info *server)
 {
-	__u16 mid = 0;
-	__u16 last_mid;
+	__u64 mid = 0;
+	__u16 last_mid, cur_mid;
 	bool collision;
 
 	spin_lock(&GlobalMid_Lock);
-	last_mid = server->CurrentMid; /* we do not want to loop forever */
-	server->CurrentMid++;
-	/* This nested loop looks more expensive than it is.
-	In practice the list of pending requests is short,
-	fewer than 50, and the mids are likely to be unique
-	on the first pass through the loop unless some request
-	takes longer than the 64 thousand requests before it
-	(and it would also have to have been a request that
-	 did not time out) */
-	while (server->CurrentMid != last_mid) {
+
+	/* mid is 16 bit only for CIFS/SMB */
+	cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+	/* we do not want to loop forever */
+	last_mid = cur_mid;
+	cur_mid++;
+
+	/*
+	 * This nested loop looks more expensive than it is.
+	 * In practice the list of pending requests is short,
+	 * fewer than 50, and the mids are likely to be unique
+	 * on the first pass through the loop unless some request
+	 * takes longer than the 64 thousand requests before it
+	 * (and it would also have to have been a request that
+	 * did not time out).
+	 */
+	while (cur_mid != last_mid) {
 		struct mid_q_entry *mid_entry;
 		unsigned int num_mids;
 
 		collision = false;
-		if (server->CurrentMid == 0)
-			server->CurrentMid++;
+		if (cur_mid == 0)
+			cur_mid++;
 
 		num_mids = 0;
 		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
 			++num_mids;
-			if (mid_entry->mid == server->CurrentMid &&
-			    mid_entry->midState == MID_REQUEST_SUBMITTED) {
+			if (mid_entry->mid == cur_mid &&
+			    mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
 				/* This mid is in use, try a different one */
 				collision = true;
 				break;
@@ -282,10 +289,11 @@
 			server->tcpStatus = CifsNeedReconnect;
 
 		if (!collision) {
-			mid = server->CurrentMid;
+			mid = (__u64)cur_mid;
+			server->CurrentMid = mid;
 			break;
 		}
-		server->CurrentMid++;
+		cur_mid++;
 	}
 	spin_unlock(&GlobalMid_Lock);
 	return mid;
@@ -420,8 +428,10 @@
 }
 
 int
-checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
+checkSMB(char *buf, unsigned int total_read)
 {
+	struct smb_hdr *smb = (struct smb_hdr *)buf;
+	__u16 mid = smb->Mid;
 	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
 	__u32 clc_len;  /* calculated length */
 	cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
@@ -502,8 +512,9 @@
 }
 
 bool
-is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
+is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
 {
+	struct smb_hdr *buf = (struct smb_hdr *)buffer;
 	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
 	struct list_head *tmp, *tmp1, *tmp2;
 	struct cifs_ses *ses;
@@ -584,7 +595,7 @@
 
 				cifs_set_oplock_level(pCifsInode,
 					pSMB->OplockLevel ? OPLOCK_READ : 0);
-				queue_work(system_nrt_wq,
+				queue_work(cifsiod_wq,
 					   &netfile->oplock_break);
 				netfile->oplock_break_cancelled = false;
 
@@ -604,16 +615,15 @@
 }
 
 void
-dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
+dump_smb(void *buf, int smb_buf_length)
 {
 	int i, j;
 	char debug_line[17];
-	unsigned char *buffer;
+	unsigned char *buffer = buf;
 
 	if (traceSMB == 0)
 		return;
 
-	buffer = (unsigned char *) smb_buf;
 	for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
 		if (i % 8 == 0) {
 			/* have reached the beginning of line */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 73e47e8..581c225 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -197,8 +197,7 @@
 		memcpy(scope_id, pct + 1, slen);
 		scope_id[slen] = '\0';
 
-		rc = strict_strtoul(scope_id, 0,
-					(unsigned long *)&s6->sin6_scope_id);
+		rc = kstrtouint(scope_id, 0, &s6->sin6_scope_id);
 		rc = (rc == 0) ? 1 : 0;
 	}
 
@@ -836,8 +835,9 @@
 }
 
 int
-map_smb_to_linux_error(struct smb_hdr *smb, bool logErr)
+map_smb_to_linux_error(char *buf, bool logErr)
 {
+	struct smb_hdr *smb = (struct smb_hdr *)buf;
 	unsigned int i;
 	int rc = -EIO;	/* if transport error smb error may not be set */
 	__u8 smberrclass;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 310918b..0961336 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -60,8 +60,8 @@
 		memset(temp, 0, sizeof(struct mid_q_entry));
 		temp->mid = smb_buffer->Mid;	/* always LE */
 		temp->pid = current->pid;
-		temp->command = smb_buffer->Command;
-		cFYI(1, "For smb_command %d", temp->command);
+		temp->command = cpu_to_le16(smb_buffer->Command);
+		cFYI(1, "For smb_command %d", smb_buffer->Command);
 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
 		/* when mid allocated can be before when sent */
 		temp->when_alloc = jiffies;
@@ -75,7 +75,7 @@
 	}
 
 	atomic_inc(&midCount);
-	temp->midState = MID_REQUEST_ALLOCATED;
+	temp->mid_state = MID_REQUEST_ALLOCATED;
 	return temp;
 }
 
@@ -85,9 +85,9 @@
 #ifdef CONFIG_CIFS_STATS2
 	unsigned long now;
 #endif
-	midEntry->midState = MID_FREE;
+	midEntry->mid_state = MID_FREE;
 	atomic_dec(&midCount);
-	if (midEntry->largeBuf)
+	if (midEntry->large_buf)
 		cifs_buf_release(midEntry->resp_buf);
 	else
 		cifs_small_buf_release(midEntry->resp_buf);
@@ -97,8 +97,8 @@
 	   something is wrong, unless it is quite a slow link or server */
 	if ((now - midEntry->when_alloc) > HZ) {
 		if ((cifsFYI & CIFS_TIMER) &&
-		   (midEntry->command != SMB_COM_LOCKING_ANDX)) {
-			printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
+		    (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
+			printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
 			       midEntry->command, midEntry->mid);
 			printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
 			       now - midEntry->when_alloc,
@@ -126,11 +126,11 @@
 	int rc = 0;
 	int i = 0;
 	struct msghdr smb_msg;
-	struct smb_hdr *smb_buffer = iov[0].iov_base;
+	__be32 *buf_len = (__be32 *)(iov[0].iov_base);
 	unsigned int len = iov[0].iov_len;
 	unsigned int total_len;
 	int first_vec = 0;
-	unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
+	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
 	struct socket *ssocket = server->ssocket;
 
 	if (ssocket == NULL)
@@ -150,7 +150,7 @@
 		total_len += iov[i].iov_len;
 
 	cFYI(1, "Sending smb:  total_len %d", total_len);
-	dump_smb(smb_buffer, len);
+	dump_smb(iov[0].iov_base, len);
 
 	i = 0;
 	while (total_len) {
@@ -158,24 +158,24 @@
 				    n_vec - first_vec, total_len);
 		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
 			i++;
-			/* if blocking send we try 3 times, since each can block
-			   for 5 seconds. For nonblocking  we have to try more
-			   but wait increasing amounts of time allowing time for
-			   socket to clear.  The overall time we wait in either
-			   case to send on the socket is about 15 seconds.
-			   Similarly we wait for 15 seconds for
-			   a response from the server in SendReceive[2]
-			   for the server to send a response back for
-			   most types of requests (except SMB Write
-			   past end of file which can be slow, and
-			   blocking lock operations). NFS waits slightly longer
-			   than CIFS, but this can make it take longer for
-			   nonresponsive servers to be detected and 15 seconds
-			   is more than enough time for modern networks to
-			   send a packet.  In most cases if we fail to send
-			   after the retries we will kill the socket and
-			   reconnect which may clear the network problem.
-			*/
+			/*
+			 * If blocking send we try 3 times, since each can block
+			 * for 5 seconds. For nonblocking  we have to try more
+			 * but wait increasing amounts of time allowing time for
+			 * socket to clear.  The overall time we wait in either
+			 * case to send on the socket is about 15 seconds.
+			 * Similarly we wait for 15 seconds for a response from
+			 * the server in SendReceive[2] for the server to send
+			 * a response back for most types of requests (except
+			 * SMB Write past end of file which can be slow, and
+			 * blocking lock operations). NFS waits slightly longer
+			 * than CIFS, but this can make it take longer for
+			 * nonresponsive servers to be detected and 15 seconds
+			 * is more than enough time for modern networks to
+			 * send a packet.  In most cases if we fail to send
+			 * after the retries we will kill the socket and
+			 * reconnect which may clear the network problem.
+			 */
 			if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
 				cERROR(1, "sends on sock %p stuck for 15 seconds",
 				    ssocket);
@@ -235,9 +235,8 @@
 	else
 		rc = 0;
 
-	/* Don't want to modify the buffer as a
-	   side effect of this call. */
-	smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
+	/* Don't want to modify the buffer as a side effect of this call. */
+	*buf_len = cpu_to_be32(smb_buf_length);
 
 	return rc;
 }
@@ -342,13 +341,40 @@
 	int error;
 
 	error = wait_event_freezekillable(server->response_q,
-				    midQ->midState != MID_REQUEST_SUBMITTED);
+				    midQ->mid_state != MID_REQUEST_SUBMITTED);
 	if (error < 0)
 		return -ERESTARTSYS;
 
 	return 0;
 }
 
+static int
+cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
+			 unsigned int nvec, struct mid_q_entry **ret_mid)
+{
+	int rc;
+	struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
+	struct mid_q_entry *mid;
+
+	/* enable signing if server requires it */
+	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	mid = AllocMidQEntry(hdr, server);
+	if (mid == NULL)
+		return -ENOMEM;
+
+	/* put it on the pending_mid_q */
+	spin_lock(&GlobalMid_Lock);
+	list_add_tail(&mid->qhead, &server->pending_mid_q);
+	spin_unlock(&GlobalMid_Lock);
+
+	rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
+	if (rc)
+		delete_mid(mid);
+	*ret_mid = mid;
+	return rc;
+}
 
 /*
  * Send a SMB request and set the callback function in the mid to handle
@@ -361,40 +387,24 @@
 {
 	int rc;
 	struct mid_q_entry *mid;
-	struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
 
 	rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
 	if (rc)
 		return rc;
 
-	/* enable signing if server requires it */
-	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
-		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
-
 	mutex_lock(&server->srv_mutex);
-	mid = AllocMidQEntry(hdr, server);
-	if (mid == NULL) {
+	rc = cifs_setup_async_request(server, iov, nvec, &mid);
+	if (rc) {
 		mutex_unlock(&server->srv_mutex);
 		cifs_add_credits(server, 1);
 		wake_up(&server->request_q);
-		return -ENOMEM;
-	}
-
-	/* put it on the pending_mid_q */
-	spin_lock(&GlobalMid_Lock);
-	list_add_tail(&mid->qhead, &server->pending_mid_q);
-	spin_unlock(&GlobalMid_Lock);
-
-	rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
-	if (rc) {
-		mutex_unlock(&server->srv_mutex);
-		goto out_err;
+		return rc;
 	}
 
 	mid->receive = receive;
 	mid->callback = callback;
 	mid->callback_data = cbdata;
-	mid->midState = MID_REQUEST_SUBMITTED;
+	mid->mid_state = MID_REQUEST_SUBMITTED;
 
 	cifs_in_send_inc(server);
 	rc = smb_sendv(server, iov, nvec);
@@ -424,14 +434,14 @@
  */
 int
 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
-		struct smb_hdr *in_buf, int flags)
+		 char *in_buf, int flags)
 {
 	int rc;
 	struct kvec iov[1];
 	int resp_buf_type;
 
-	iov[0].iov_base = (char *)in_buf;
-	iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
+	iov[0].iov_base = in_buf;
+	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
 	flags |= CIFS_NO_RESP;
 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
 	cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
@@ -444,11 +454,11 @@
 {
 	int rc = 0;
 
-	cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
-		mid->mid, mid->midState);
+	cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
+	     le16_to_cpu(mid->command), mid->mid, mid->mid_state);
 
 	spin_lock(&GlobalMid_Lock);
-	switch (mid->midState) {
+	switch (mid->mid_state) {
 	case MID_RESPONSE_RECEIVED:
 		spin_unlock(&GlobalMid_Lock);
 		return rc;
@@ -463,8 +473,8 @@
 		break;
 	default:
 		list_del_init(&mid->qhead);
-		cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
-			mid->mid, mid->midState);
+		cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
+		       mid->mid, mid->mid_state);
 		rc = -EIO;
 	}
 	spin_unlock(&GlobalMid_Lock);
@@ -514,7 +524,7 @@
 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
 		   bool log_error)
 {
-	unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
+	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
 
 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
 
@@ -534,6 +544,24 @@
 	return map_smb_to_linux_error(mid->resp_buf, log_error);
 }
 
+static int
+cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
+		   unsigned int nvec, struct mid_q_entry **ret_mid)
+{
+	int rc;
+	struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
+	struct mid_q_entry *mid;
+
+	rc = allocate_mid(ses, hdr, &mid);
+	if (rc)
+		return rc;
+	rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
+	if (rc)
+		delete_mid(mid);
+	*ret_mid = mid;
+	return rc;
+}
+
 int
 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
 	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@@ -542,55 +570,53 @@
 	int rc = 0;
 	int long_op;
 	struct mid_q_entry *midQ;
-	struct smb_hdr *in_buf = iov[0].iov_base;
+	char *buf = iov[0].iov_base;
 
 	long_op = flags & CIFS_TIMEOUT_MASK;
 
 	*pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */
 
 	if ((ses == NULL) || (ses->server == NULL)) {
-		cifs_small_buf_release(in_buf);
+		cifs_small_buf_release(buf);
 		cERROR(1, "Null session");
 		return -EIO;
 	}
 
 	if (ses->server->tcpStatus == CifsExiting) {
-		cifs_small_buf_release(in_buf);
+		cifs_small_buf_release(buf);
 		return -ENOENT;
 	}
 
-	/* Ensure that we do not send more than 50 overlapping requests
-	   to the same server. We may make this configurable later or
-	   use ses->maxReq */
+	/*
+	 * Ensure that we do not send more than 50 overlapping requests
+	 * to the same server. We may make this configurable later or
+	 * use ses->maxReq.
+	 */
 
 	rc = wait_for_free_request(ses->server, long_op);
 	if (rc) {
-		cifs_small_buf_release(in_buf);
+		cifs_small_buf_release(buf);
 		return rc;
 	}
 
-	/* make sure that we sign in the same order that we send on this socket
-	   and avoid races inside tcp sendmsg code that could cause corruption
-	   of smb data */
+	/*
+	 * Make sure that we sign in the same order that we send on this socket
+	 * and avoid races inside tcp sendmsg code that could cause corruption
+	 * of smb data.
+	 */
 
 	mutex_lock(&ses->server->srv_mutex);
 
-	rc = allocate_mid(ses, in_buf, &midQ);
+	rc = cifs_setup_request(ses, iov, n_vec, &midQ);
 	if (rc) {
 		mutex_unlock(&ses->server->srv_mutex);
-		cifs_small_buf_release(in_buf);
+		cifs_small_buf_release(buf);
 		/* Update # of requests on wire to server */
 		cifs_add_credits(ses->server, 1);
 		return rc;
 	}
-	rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
-	if (rc) {
-		mutex_unlock(&ses->server->srv_mutex);
-		cifs_small_buf_release(in_buf);
-		goto out;
-	}
 
-	midQ->midState = MID_REQUEST_SUBMITTED;
+	midQ->mid_state = MID_REQUEST_SUBMITTED;
 	cifs_in_send_inc(ses->server);
 	rc = smb_sendv(ses->server, iov, n_vec);
 	cifs_in_send_dec(ses->server);
@@ -599,30 +625,30 @@
 	mutex_unlock(&ses->server->srv_mutex);
 
 	if (rc < 0) {
-		cifs_small_buf_release(in_buf);
+		cifs_small_buf_release(buf);
 		goto out;
 	}
 
 	if (long_op == CIFS_ASYNC_OP) {
-		cifs_small_buf_release(in_buf);
+		cifs_small_buf_release(buf);
 		goto out;
 	}
 
 	rc = wait_for_response(ses->server, midQ);
 	if (rc != 0) {
-		send_nt_cancel(ses->server, in_buf, midQ);
+		send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
 		spin_lock(&GlobalMid_Lock);
-		if (midQ->midState == MID_REQUEST_SUBMITTED) {
+		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 			midQ->callback = DeleteMidQEntry;
 			spin_unlock(&GlobalMid_Lock);
-			cifs_small_buf_release(in_buf);
+			cifs_small_buf_release(buf);
 			cifs_add_credits(ses->server, 1);
 			return rc;
 		}
 		spin_unlock(&GlobalMid_Lock);
 	}
 
-	cifs_small_buf_release(in_buf);
+	cifs_small_buf_release(buf);
 
 	rc = cifs_sync_mid_result(midQ, ses->server);
 	if (rc != 0) {
@@ -630,15 +656,16 @@
 		return rc;
 	}
 
-	if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
+	if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
 		rc = -EIO;
 		cFYI(1, "Bad MID state?");
 		goto out;
 	}
 
-	iov[0].iov_base = (char *)midQ->resp_buf;
-	iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
-	if (midQ->largeBuf)
+	buf = (char *)midQ->resp_buf;
+	iov[0].iov_base = buf;
+	iov[0].iov_len = get_rfc1002_length(buf) + 4;
+	if (midQ->large_buf)
 		*pRespBufType = CIFS_LARGE_BUFFER;
 	else
 		*pRespBufType = CIFS_SMALL_BUFFER;
@@ -710,7 +737,7 @@
 		goto out;
 	}
 
-	midQ->midState = MID_REQUEST_SUBMITTED;
+	midQ->mid_state = MID_REQUEST_SUBMITTED;
 
 	cifs_in_send_inc(ses->server);
 	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
@@ -728,7 +755,7 @@
 	if (rc != 0) {
 		send_nt_cancel(ses->server, in_buf, midQ);
 		spin_lock(&GlobalMid_Lock);
-		if (midQ->midState == MID_REQUEST_SUBMITTED) {
+		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 			/* no longer considered to be "in-flight" */
 			midQ->callback = DeleteMidQEntry;
 			spin_unlock(&GlobalMid_Lock);
@@ -745,13 +772,13 @@
 	}
 
 	if (!midQ->resp_buf || !out_buf ||
-	    midQ->midState != MID_RESPONSE_RECEIVED) {
+	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
 		rc = -EIO;
 		cERROR(1, "Bad MID state?");
 		goto out;
 	}
 
-	*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
 	rc = cifs_check_receive(midQ, ses->server, 0);
 out:
@@ -844,7 +871,7 @@
 		return rc;
 	}
 
-	midQ->midState = MID_REQUEST_SUBMITTED;
+	midQ->mid_state = MID_REQUEST_SUBMITTED;
 	cifs_in_send_inc(ses->server);
 	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
 	cifs_in_send_dec(ses->server);
@@ -858,13 +885,13 @@
 
 	/* Wait for a reply - allow signals to interrupt. */
 	rc = wait_event_interruptible(ses->server->response_q,
-		(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
+		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
 		((ses->server->tcpStatus != CifsGood) &&
 		 (ses->server->tcpStatus != CifsNew)));
 
 	/* Were we interrupted by a signal ? */
 	if ((rc == -ERESTARTSYS) &&
-		(midQ->midState == MID_REQUEST_SUBMITTED) &&
+		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
 		((ses->server->tcpStatus == CifsGood) ||
 		 (ses->server->tcpStatus == CifsNew))) {
 
@@ -894,7 +921,7 @@
 		if (rc) {
 			send_nt_cancel(ses->server, in_buf, midQ);
 			spin_lock(&GlobalMid_Lock);
-			if (midQ->midState == MID_REQUEST_SUBMITTED) {
+			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 				/* no longer considered to be "in-flight" */
 				midQ->callback = DeleteMidQEntry;
 				spin_unlock(&GlobalMid_Lock);
@@ -912,13 +939,13 @@
 		return rc;
 
 	/* rcvd frame is ok */
-	if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
+	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
 		rc = -EIO;
 		cERROR(1, "Bad MID state?");
 		goto out;
 	}
 
-	*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
 	rc = cifs_check_receive(midQ, ses->server, 0);
 out:
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 21e93605..5dfafdd 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -33,18 +33,10 @@
 	return count;
 }
 
-static int default_open(struct inode *inode, struct file *file)
-{
-	if (inode->i_private)
-		file->private_data = inode->i_private;
-
-	return 0;
-}
-
 const struct file_operations debugfs_file_operations = {
 	.read =		default_read_file,
 	.write =	default_write_file,
-	.open =		default_open,
+	.open =		simple_open,
 	.llseek =	noop_llseek,
 };
 
@@ -447,7 +439,7 @@
 static const struct file_operations fops_bool = {
 	.read =		read_file_bool,
 	.write =	write_file_bool,
-	.open =		default_open,
+	.open =		simple_open,
 	.llseek =	default_llseek,
 };
 
@@ -492,7 +484,7 @@
 
 static const struct file_operations fops_blob = {
 	.read =		read_file_blob,
-	.open =		default_open,
+	.open =		simple_open,
 	.llseek =	default_llseek,
 };
 
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 3dca2b3..1c9b0809 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -609,13 +609,6 @@
 /*
  * dump lkb's on the ls_waiters list
  */
-
-static int waiters_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t waiters_read(struct file *file, char __user *userbuf,
 			    size_t count, loff_t *ppos)
 {
@@ -644,7 +637,7 @@
 
 static const struct file_operations waiters_fops = {
 	.owner   = THIS_MODULE,
-	.open    = waiters_open,
+	.open    = simple_open,
 	.read    = waiters_read,
 	.llseek  = default_llseek,
 };
diff --git a/fs/exec.c b/fs/exec.c
index 9a1d9f0..b1fd202 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1371,7 +1371,7 @@
 	unsigned int depth = bprm->recursion_depth;
 	int try,retval;
 	struct linux_binfmt *fmt;
-	pid_t old_pid;
+	pid_t old_pid, old_vpid;
 
 	retval = security_bprm_check(bprm);
 	if (retval)
@@ -1382,8 +1382,9 @@
 		return retval;
 
 	/* Need to fetch pid before load_binary changes it */
+	old_pid = current->pid;
 	rcu_read_lock();
-	old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
+	old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
 	rcu_read_unlock();
 
 	retval = -ENOENT;
@@ -1406,7 +1407,7 @@
 			if (retval >= 0) {
 				if (depth == 0) {
 					trace_sched_process_exec(current, old_pid, bprm);
-					ptrace_event(PTRACE_EVENT_EXEC, old_pid);
+					ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
 				}
 				put_binfmt(fmt);
 				allow_write_access(bprm->file);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 75ad433..0b2b4db 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -1,5 +1,636 @@
+/*
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/include/linux/minix_fs.h
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
 #include <linux/fs.h>
 #include <linux/ext2_fs.h>
+#include <linux/blockgroup_lock.h>
+#include <linux/percpu_counter.h>
+#include <linux/rbtree.h>
+
+/* XXX Here for now... not interested in restructing headers JUST now */
+
+/* data type for block offset of block group */
+typedef int ext2_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long ext2_fsblk_t;
+
+#define E2FSBLK "%lu"
+
+struct ext2_reserve_window {
+	ext2_fsblk_t		_rsv_start;	/* First byte reserved */
+	ext2_fsblk_t		_rsv_end;	/* Last byte reserved or 0 */
+};
+
+struct ext2_reserve_window_node {
+	struct rb_node	 	rsv_node;
+	__u32			rsv_goal_size;
+	__u32			rsv_alloc_hit;
+	struct ext2_reserve_window	rsv_window;
+};
+
+struct ext2_block_alloc_info {
+	/* information about reservation window */
+	struct ext2_reserve_window_node	rsv_window_node;
+	/*
+	 * was i_next_alloc_block in ext2_inode_info
+	 * is the logical (file-relative) number of the
+	 * most-recently-allocated block in this file.
+	 * We use this for detecting linearly ascending allocation requests.
+	 */
+	__u32			last_alloc_logical_block;
+	/*
+	 * Was i_next_alloc_goal in ext2_inode_info
+	 * is the *physical* companion to i_next_alloc_block.
+	 * it the the physical block number of the block which was most-recentl
+	 * allocated to this file.  This give us the goal (target) for the next
+	 * allocation when we detect linearly ascending requests.
+	 */
+	ext2_fsblk_t		last_alloc_physical_block;
+};
+
+#define rsv_start rsv_window._rsv_start
+#define rsv_end rsv_window._rsv_end
+
+/*
+ * second extended-fs super-block data in memory
+ */
+struct ext2_sb_info {
+	unsigned long s_frag_size;	/* Size of a fragment in bytes */
+	unsigned long s_frags_per_block;/* Number of fragments per block */
+	unsigned long s_inodes_per_block;/* Number of inodes per block */
+	unsigned long s_frags_per_group;/* Number of fragments in a group */
+	unsigned long s_blocks_per_group;/* Number of blocks in a group */
+	unsigned long s_inodes_per_group;/* Number of inodes in a group */
+	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
+	unsigned long s_gdb_count;	/* Number of group descriptor blocks */
+	unsigned long s_desc_per_block;	/* Number of group descriptors per block */
+	unsigned long s_groups_count;	/* Number of groups in the fs */
+	unsigned long s_overhead_last;  /* Last calculated overhead */
+	unsigned long s_blocks_last;    /* Last seen block count */
+	struct buffer_head * s_sbh;	/* Buffer containing the super block */
+	struct ext2_super_block * s_es;	/* Pointer to the super block in the buffer */
+	struct buffer_head ** s_group_desc;
+	unsigned long  s_mount_opt;
+	unsigned long s_sb_block;
+	uid_t s_resuid;
+	gid_t s_resgid;
+	unsigned short s_mount_state;
+	unsigned short s_pad;
+	int s_addr_per_block_bits;
+	int s_desc_per_block_bits;
+	int s_inode_size;
+	int s_first_ino;
+	spinlock_t s_next_gen_lock;
+	u32 s_next_generation;
+	unsigned long s_dir_count;
+	u8 *s_debts;
+	struct percpu_counter s_freeblocks_counter;
+	struct percpu_counter s_freeinodes_counter;
+	struct percpu_counter s_dirs_counter;
+	struct blockgroup_lock *s_blockgroup_lock;
+	/* root of the per fs reservation window tree */
+	spinlock_t s_rsv_window_lock;
+	struct rb_root s_rsv_window_root;
+	struct ext2_reserve_window_node s_rsv_window_head;
+	/*
+	 * s_lock protects against concurrent modifications of s_mount_state,
+	 * s_blocks_last, s_overhead_last and the content of superblock's
+	 * buffer pointed to by sbi->s_es.
+	 *
+	 * Note: It is used in ext2_show_options() to provide a consistent view
+	 * of the mount options.
+	 */
+	spinlock_t s_lock;
+};
+
+static inline spinlock_t *
+sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
+{
+	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
+}
+
+/*
+ * Define EXT2FS_DEBUG to produce debug messages
+ */
+#undef EXT2FS_DEBUG
+
+/*
+ * Define EXT2_RESERVATION to reserve data blocks for expanding files
+ */
+#define EXT2_DEFAULT_RESERVE_BLOCKS     8
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT2_MAX_RESERVE_BLOCKS         1027
+#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
+/*
+ * The second extended file system version
+ */
+#define EXT2FS_DATE		"95/08/09"
+#define EXT2FS_VERSION		"0.5b"
+
+/*
+ * Debug code
+ */
+#ifdef EXT2FS_DEBUG
+#	define ext2_debug(f, a...)	{ \
+					printk ("EXT2-fs DEBUG (%s, %d): %s:", \
+						__FILE__, __LINE__, __func__); \
+				  	printk (f, ## a); \
+					}
+#else
+#	define ext2_debug(f, a...)	/**/
+#endif
+
+/*
+ * Special inode numbers
+ */
+#define	EXT2_BAD_INO		 1	/* Bad blocks inode */
+#define EXT2_ROOT_INO		 2	/* Root inode */
+#define EXT2_BOOT_LOADER_INO	 5	/* Boot loader inode */
+#define EXT2_UNDEL_DIR_INO	 6	/* Undelete directory inode */
+
+/* First non-reserved inode for old ext2 filesystems */
+#define EXT2_GOOD_OLD_FIRST_INO	11
+
+static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+/*
+ * Macro-instructions used to manage several block sizes
+ */
+#define EXT2_MIN_BLOCK_SIZE		1024
+#define	EXT2_MAX_BLOCK_SIZE		4096
+#define EXT2_MIN_BLOCK_LOG_SIZE		  10
+#define EXT2_BLOCK_SIZE(s)		((s)->s_blocksize)
+#define	EXT2_ADDR_PER_BLOCK(s)		(EXT2_BLOCK_SIZE(s) / sizeof (__u32))
+#define EXT2_BLOCK_SIZE_BITS(s)		((s)->s_blocksize_bits)
+#define	EXT2_ADDR_PER_BLOCK_BITS(s)	(EXT2_SB(s)->s_addr_per_block_bits)
+#define EXT2_INODE_SIZE(s)		(EXT2_SB(s)->s_inode_size)
+#define EXT2_FIRST_INO(s)		(EXT2_SB(s)->s_first_ino)
+
+/*
+ * Macro-instructions used to manage fragments
+ */
+#define EXT2_MIN_FRAG_SIZE		1024
+#define	EXT2_MAX_FRAG_SIZE		4096
+#define EXT2_MIN_FRAG_LOG_SIZE		  10
+#define EXT2_FRAG_SIZE(s)		(EXT2_SB(s)->s_frag_size)
+#define EXT2_FRAGS_PER_BLOCK(s)		(EXT2_SB(s)->s_frags_per_block)
+
+/*
+ * Structure of a blocks group descriptor
+ */
+struct ext2_group_desc
+{
+	__le32	bg_block_bitmap;		/* Blocks bitmap block */
+	__le32	bg_inode_bitmap;		/* Inodes bitmap block */
+	__le32	bg_inode_table;		/* Inodes table block */
+	__le16	bg_free_blocks_count;	/* Free blocks count */
+	__le16	bg_free_inodes_count;	/* Free inodes count */
+	__le16	bg_used_dirs_count;	/* Directories count */
+	__le16	bg_pad;
+	__le32	bg_reserved[3];
+};
+
+/*
+ * Macro-instructions used to manage group descriptors
+ */
+#define EXT2_BLOCKS_PER_GROUP(s)	(EXT2_SB(s)->s_blocks_per_group)
+#define EXT2_DESC_PER_BLOCK(s)		(EXT2_SB(s)->s_desc_per_block)
+#define EXT2_INODES_PER_GROUP(s)	(EXT2_SB(s)->s_inodes_per_group)
+#define EXT2_DESC_PER_BLOCK_BITS(s)	(EXT2_SB(s)->s_desc_per_block_bits)
+
+/*
+ * Constants relative to the data blocks
+ */
+#define	EXT2_NDIR_BLOCKS		12
+#define	EXT2_IND_BLOCK			EXT2_NDIR_BLOCKS
+#define	EXT2_DIND_BLOCK			(EXT2_IND_BLOCK + 1)
+#define	EXT2_TIND_BLOCK			(EXT2_DIND_BLOCK + 1)
+#define	EXT2_N_BLOCKS			(EXT2_TIND_BLOCK + 1)
+
+/*
+ * Inode flags (GETFLAGS/SETFLAGS)
+ */
+#define	EXT2_SECRM_FL			FS_SECRM_FL	/* Secure deletion */
+#define	EXT2_UNRM_FL			FS_UNRM_FL	/* Undelete */
+#define	EXT2_COMPR_FL			FS_COMPR_FL	/* Compress file */
+#define EXT2_SYNC_FL			FS_SYNC_FL	/* Synchronous updates */
+#define EXT2_IMMUTABLE_FL		FS_IMMUTABLE_FL	/* Immutable file */
+#define EXT2_APPEND_FL			FS_APPEND_FL	/* writes to file may only append */
+#define EXT2_NODUMP_FL			FS_NODUMP_FL	/* do not dump file */
+#define EXT2_NOATIME_FL			FS_NOATIME_FL	/* do not update atime */
+/* Reserved for compression usage... */
+#define EXT2_DIRTY_FL			FS_DIRTY_FL
+#define EXT2_COMPRBLK_FL		FS_COMPRBLK_FL	/* One or more compressed clusters */
+#define EXT2_NOCOMP_FL			FS_NOCOMP_FL	/* Don't compress */
+#define EXT2_ECOMPR_FL			FS_ECOMPR_FL	/* Compression error */
+/* End compression flags --- maybe not all used */	
+#define EXT2_BTREE_FL			FS_BTREE_FL	/* btree format dir */
+#define EXT2_INDEX_FL			FS_INDEX_FL	/* hash-indexed directory */
+#define EXT2_IMAGIC_FL			FS_IMAGIC_FL	/* AFS directory */
+#define EXT2_JOURNAL_DATA_FL		FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define EXT2_NOTAIL_FL			FS_NOTAIL_FL	/* file tail should not be merged */
+#define EXT2_DIRSYNC_FL			FS_DIRSYNC_FL	/* dirsync behaviour (directories only) */
+#define EXT2_TOPDIR_FL			FS_TOPDIR_FL	/* Top of directory hierarchies*/
+#define EXT2_RESERVED_FL		FS_RESERVED_FL	/* reserved for ext2 lib */
+
+#define EXT2_FL_USER_VISIBLE		FS_FL_USER_VISIBLE	/* User visible flags */
+#define EXT2_FL_USER_MODIFIABLE		FS_FL_USER_MODIFIABLE	/* User modifiable flags */
+
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
+			   EXT2_SYNC_FL | EXT2_NODUMP_FL |\
+			   EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
+			   EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
+			   EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
+{
+	if (S_ISDIR(mode))
+		return flags;
+	else if (S_ISREG(mode))
+		return flags & EXT2_REG_FLMASK;
+	else
+		return flags & EXT2_OTHER_FLMASK;
+}
+
+/*
+ * ioctl commands
+ */
+#define	EXT2_IOC_GETFLAGS		FS_IOC_GETFLAGS
+#define	EXT2_IOC_SETFLAGS		FS_IOC_SETFLAGS
+#define	EXT2_IOC_GETVERSION		FS_IOC_GETVERSION
+#define	EXT2_IOC_SETVERSION		FS_IOC_SETVERSION
+#define	EXT2_IOC_GETRSVSZ		_IOR('f', 5, long)
+#define	EXT2_IOC_SETRSVSZ		_IOW('f', 6, long)
+
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define EXT2_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
+#define EXT2_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
+#define EXT2_IOC32_GETVERSION		FS_IOC32_GETVERSION
+#define EXT2_IOC32_SETVERSION		FS_IOC32_SETVERSION
+
+/*
+ * Structure of an inode on the disk
+ */
+struct ext2_inode {
+	__le16	i_mode;		/* File mode */
+	__le16	i_uid;		/* Low 16 bits of Owner Uid */
+	__le32	i_size;		/* Size in bytes */
+	__le32	i_atime;	/* Access time */
+	__le32	i_ctime;	/* Creation time */
+	__le32	i_mtime;	/* Modification time */
+	__le32	i_dtime;	/* Deletion Time */
+	__le16	i_gid;		/* Low 16 bits of Group Id */
+	__le16	i_links_count;	/* Links count */
+	__le32	i_blocks;	/* Blocks count */
+	__le32	i_flags;	/* File flags */
+	union {
+		struct {
+			__le32  l_i_reserved1;
+		} linux1;
+		struct {
+			__le32  h_i_translator;
+		} hurd1;
+		struct {
+			__le32  m_i_reserved1;
+		} masix1;
+	} osd1;				/* OS dependent 1 */
+	__le32	i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
+	__le32	i_generation;	/* File version (for NFS) */
+	__le32	i_file_acl;	/* File ACL */
+	__le32	i_dir_acl;	/* Directory ACL */
+	__le32	i_faddr;	/* Fragment address */
+	union {
+		struct {
+			__u8	l_i_frag;	/* Fragment number */
+			__u8	l_i_fsize;	/* Fragment size */
+			__u16	i_pad1;
+			__le16	l_i_uid_high;	/* these 2 fields    */
+			__le16	l_i_gid_high;	/* were reserved2[0] */
+			__u32	l_i_reserved2;
+		} linux2;
+		struct {
+			__u8	h_i_frag;	/* Fragment number */
+			__u8	h_i_fsize;	/* Fragment size */
+			__le16	h_i_mode_high;
+			__le16	h_i_uid_high;
+			__le16	h_i_gid_high;
+			__le32	h_i_author;
+		} hurd2;
+		struct {
+			__u8	m_i_frag;	/* Fragment number */
+			__u8	m_i_fsize;	/* Fragment size */
+			__u16	m_pad1;
+			__u32	m_i_reserved2[2];
+		} masix2;
+	} osd2;				/* OS dependent 2 */
+};
+
+#define i_size_high	i_dir_acl
+
+#define i_reserved1	osd1.linux1.l_i_reserved1
+#define i_frag		osd2.linux2.l_i_frag
+#define i_fsize		osd2.linux2.l_i_fsize
+#define i_uid_low	i_uid
+#define i_gid_low	i_gid
+#define i_uid_high	osd2.linux2.l_i_uid_high
+#define i_gid_high	osd2.linux2.l_i_gid_high
+#define i_reserved2	osd2.linux2.l_i_reserved2
+
+/*
+ * File system states
+ */
+#define	EXT2_VALID_FS			0x0001	/* Unmounted cleanly */
+#define	EXT2_ERROR_FS			0x0002	/* Errors detected */
+
+/*
+ * Mount flags
+ */
+#define EXT2_MOUNT_CHECK		0x000001  /* Do mount-time checks */
+#define EXT2_MOUNT_OLDALLOC		0x000002  /* Don't use the new Orlov allocator */
+#define EXT2_MOUNT_GRPID		0x000004  /* Create files with directory's group */
+#define EXT2_MOUNT_DEBUG		0x000008  /* Some debugging messages */
+#define EXT2_MOUNT_ERRORS_CONT		0x000010  /* Continue on errors */
+#define EXT2_MOUNT_ERRORS_RO		0x000020  /* Remount fs ro on errors */
+#define EXT2_MOUNT_ERRORS_PANIC		0x000040  /* Panic on errors */
+#define EXT2_MOUNT_MINIX_DF		0x000080  /* Mimics the Minix statfs */
+#define EXT2_MOUNT_NOBH			0x000100  /* No buffer_heads */
+#define EXT2_MOUNT_NO_UID32		0x000200  /* Disable 32-bit UIDs */
+#define EXT2_MOUNT_XATTR_USER		0x004000  /* Extended user attributes */
+#define EXT2_MOUNT_POSIX_ACL		0x008000  /* POSIX Access Control Lists */
+#define EXT2_MOUNT_XIP			0x010000  /* Execute in place */
+#define EXT2_MOUNT_USRQUOTA		0x020000  /* user quota */
+#define EXT2_MOUNT_GRPQUOTA		0x040000  /* group quota */
+#define EXT2_MOUNT_RESERVATION		0x080000  /* Preallocation */
+
+
+#define clear_opt(o, opt)		o &= ~EXT2_MOUNT_##opt
+#define set_opt(o, opt)			o |= EXT2_MOUNT_##opt
+#define test_opt(sb, opt)		(EXT2_SB(sb)->s_mount_opt & \
+					 EXT2_MOUNT_##opt)
+/*
+ * Maximal mount counts between two filesystem checks
+ */
+#define EXT2_DFL_MAX_MNT_COUNT		20	/* Allow 20 mounts */
+#define EXT2_DFL_CHECKINTERVAL		0	/* Don't use interval check */
+
+/*
+ * Behaviour when detecting errors
+ */
+#define EXT2_ERRORS_CONTINUE		1	/* Continue execution */
+#define EXT2_ERRORS_RO			2	/* Remount fs read-only */
+#define EXT2_ERRORS_PANIC		3	/* Panic */
+#define EXT2_ERRORS_DEFAULT		EXT2_ERRORS_CONTINUE
+
+/*
+ * Structure of the super block
+ */
+struct ext2_super_block {
+	__le32	s_inodes_count;		/* Inodes count */
+	__le32	s_blocks_count;		/* Blocks count */
+	__le32	s_r_blocks_count;	/* Reserved blocks count */
+	__le32	s_free_blocks_count;	/* Free blocks count */
+	__le32	s_free_inodes_count;	/* Free inodes count */
+	__le32	s_first_data_block;	/* First Data Block */
+	__le32	s_log_block_size;	/* Block size */
+	__le32	s_log_frag_size;	/* Fragment size */
+	__le32	s_blocks_per_group;	/* # Blocks per group */
+	__le32	s_frags_per_group;	/* # Fragments per group */
+	__le32	s_inodes_per_group;	/* # Inodes per group */
+	__le32	s_mtime;		/* Mount time */
+	__le32	s_wtime;		/* Write time */
+	__le16	s_mnt_count;		/* Mount count */
+	__le16	s_max_mnt_count;	/* Maximal mount count */
+	__le16	s_magic;		/* Magic signature */
+	__le16	s_state;		/* File system state */
+	__le16	s_errors;		/* Behaviour when detecting errors */
+	__le16	s_minor_rev_level; 	/* minor revision level */
+	__le32	s_lastcheck;		/* time of last check */
+	__le32	s_checkinterval;	/* max. time between checks */
+	__le32	s_creator_os;		/* OS */
+	__le32	s_rev_level;		/* Revision level */
+	__le16	s_def_resuid;		/* Default uid for reserved blocks */
+	__le16	s_def_resgid;		/* Default gid for reserved blocks */
+	/*
+	 * These fields are for EXT2_DYNAMIC_REV superblocks only.
+	 *
+	 * Note: the difference between the compatible feature set and
+	 * the incompatible feature set is that if there is a bit set
+	 * in the incompatible feature set that the kernel doesn't
+	 * know about, it should refuse to mount the filesystem.
+	 * 
+	 * e2fsck's requirements are more strict; if it doesn't know
+	 * about a feature in either the compatible or incompatible
+	 * feature set, it must abort and not try to meddle with
+	 * things it doesn't understand...
+	 */
+	__le32	s_first_ino; 		/* First non-reserved inode */
+	__le16   s_inode_size; 		/* size of inode structure */
+	__le16	s_block_group_nr; 	/* block group # of this superblock */
+	__le32	s_feature_compat; 	/* compatible feature set */
+	__le32	s_feature_incompat; 	/* incompatible feature set */
+	__le32	s_feature_ro_compat; 	/* readonly-compatible feature set */
+	__u8	s_uuid[16];		/* 128-bit uuid for volume */
+	char	s_volume_name[16]; 	/* volume name */
+	char	s_last_mounted[64]; 	/* directory where last mounted */
+	__le32	s_algorithm_usage_bitmap; /* For compression */
+	/*
+	 * Performance hints.  Directory preallocation should only
+	 * happen if the EXT2_COMPAT_PREALLOC flag is on.
+	 */
+	__u8	s_prealloc_blocks;	/* Nr of blocks to try to preallocate*/
+	__u8	s_prealloc_dir_blocks;	/* Nr to preallocate for dirs */
+	__u16	s_padding1;
+	/*
+	 * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
+	 */
+	__u8	s_journal_uuid[16];	/* uuid of journal superblock */
+	__u32	s_journal_inum;		/* inode number of journal file */
+	__u32	s_journal_dev;		/* device number of journal file */
+	__u32	s_last_orphan;		/* start of list of inodes to delete */
+	__u32	s_hash_seed[4];		/* HTREE hash seed */
+	__u8	s_def_hash_version;	/* Default hash version to use */
+	__u8	s_reserved_char_pad;
+	__u16	s_reserved_word_pad;
+	__le32	s_default_mount_opts;
+ 	__le32	s_first_meta_bg; 	/* First metablock block group */
+	__u32	s_reserved[190];	/* Padding to the end of the block */
+};
+
+/*
+ * Codes for operating systems
+ */
+#define EXT2_OS_LINUX		0
+#define EXT2_OS_HURD		1
+#define EXT2_OS_MASIX		2
+#define EXT2_OS_FREEBSD		3
+#define EXT2_OS_LITES		4
+
+/*
+ * Revision levels
+ */
+#define EXT2_GOOD_OLD_REV	0	/* The good old (original) format */
+#define EXT2_DYNAMIC_REV	1 	/* V2 format w/ dynamic inode sizes */
+
+#define EXT2_CURRENT_REV	EXT2_GOOD_OLD_REV
+#define EXT2_MAX_SUPP_REV	EXT2_DYNAMIC_REV
+
+#define EXT2_GOOD_OLD_INODE_SIZE 128
+
+/*
+ * Feature set definitions
+ */
+
+#define EXT2_HAS_COMPAT_FEATURE(sb,mask)			\
+	( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
+#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask)			\
+	( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
+#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask)			\
+	( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
+#define EXT2_SET_COMPAT_FEATURE(sb,mask)			\
+	EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
+#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask)			\
+	EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
+#define EXT2_SET_INCOMPAT_FEATURE(sb,mask)			\
+	EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
+#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask)			\
+	EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
+#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask)			\
+	EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
+#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask)			\
+	EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
+
+#define EXT2_FEATURE_COMPAT_DIR_PREALLOC	0x0001
+#define EXT2_FEATURE_COMPAT_IMAGIC_INODES	0x0002
+#define EXT3_FEATURE_COMPAT_HAS_JOURNAL		0x0004
+#define EXT2_FEATURE_COMPAT_EXT_ATTR		0x0008
+#define EXT2_FEATURE_COMPAT_RESIZE_INO		0x0010
+#define EXT2_FEATURE_COMPAT_DIR_INDEX		0x0020
+#define EXT2_FEATURE_COMPAT_ANY			0xffffffff
+
+#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER	0x0001
+#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE	0x0002
+#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR	0x0004
+#define EXT2_FEATURE_RO_COMPAT_ANY		0xffffffff
+
+#define EXT2_FEATURE_INCOMPAT_COMPRESSION	0x0001
+#define EXT2_FEATURE_INCOMPAT_FILETYPE		0x0002
+#define EXT3_FEATURE_INCOMPAT_RECOVER		0x0004
+#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV	0x0008
+#define EXT2_FEATURE_INCOMPAT_META_BG		0x0010
+#define EXT2_FEATURE_INCOMPAT_ANY		0xffffffff
+
+#define EXT2_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
+#define EXT2_FEATURE_INCOMPAT_SUPP	(EXT2_FEATURE_INCOMPAT_FILETYPE| \
+					 EXT2_FEATURE_INCOMPAT_META_BG)
+#define EXT2_FEATURE_RO_COMPAT_SUPP	(EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+					 EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
+					 EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
+#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED	~EXT2_FEATURE_RO_COMPAT_SUPP
+#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED	~EXT2_FEATURE_INCOMPAT_SUPP
+
+/*
+ * Default values for user and/or group using reserved blocks
+ */
+#define	EXT2_DEF_RESUID		0
+#define	EXT2_DEF_RESGID		0
+
+/*
+ * Default mount options
+ */
+#define EXT2_DEFM_DEBUG		0x0001
+#define EXT2_DEFM_BSDGROUPS	0x0002
+#define EXT2_DEFM_XATTR_USER	0x0004
+#define EXT2_DEFM_ACL		0x0008
+#define EXT2_DEFM_UID16		0x0010
+    /* Not used by ext2, but reserved for use by ext3 */
+#define EXT3_DEFM_JMODE		0x0060 
+#define EXT3_DEFM_JMODE_DATA	0x0020
+#define EXT3_DEFM_JMODE_ORDERED	0x0040
+#define EXT3_DEFM_JMODE_WBACK	0x0060
+
+/*
+ * Structure of a directory entry
+ */
+
+struct ext2_dir_entry {
+	__le32	inode;			/* Inode number */
+	__le16	rec_len;		/* Directory entry length */
+	__le16	name_len;		/* Name length */
+	char	name[];			/* File name, up to EXT2_NAME_LEN */
+};
+
+/*
+ * The new version of the directory entry.  Since EXT2 structures are
+ * stored in intel byte order, and the name_len field could never be
+ * bigger than 255 chars, it's safe to reclaim the extra byte for the
+ * file_type field.
+ */
+struct ext2_dir_entry_2 {
+	__le32	inode;			/* Inode number */
+	__le16	rec_len;		/* Directory entry length */
+	__u8	name_len;		/* Name length */
+	__u8	file_type;
+	char	name[];			/* File name, up to EXT2_NAME_LEN */
+};
+
+/*
+ * Ext2 directory file types.  Only the low 3 bits are used.  The
+ * other bits are reserved for now.
+ */
+enum {
+	EXT2_FT_UNKNOWN		= 0,
+	EXT2_FT_REG_FILE	= 1,
+	EXT2_FT_DIR		= 2,
+	EXT2_FT_CHRDEV		= 3,
+	EXT2_FT_BLKDEV		= 4,
+	EXT2_FT_FIFO		= 5,
+	EXT2_FT_SOCK		= 6,
+	EXT2_FT_SYMLINK		= 7,
+	EXT2_FT_MAX
+};
+
+/*
+ * EXT2_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 4
+ */
+#define EXT2_DIR_PAD		 	4
+#define EXT2_DIR_ROUND 			(EXT2_DIR_PAD - 1)
+#define EXT2_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT2_DIR_ROUND) & \
+					 ~EXT2_DIR_ROUND)
+#define EXT2_MAX_REC_LEN		((1<<16)-1)
+
+static inline void verify_offsets(void)
+{
+#define A(x,y) BUILD_BUG_ON(x != offsetof(struct ext2_super_block, y));
+	A(EXT2_SB_MAGIC_OFFSET, s_magic);
+	A(EXT2_SB_BLOCKS_OFFSET, s_blocks_count);
+	A(EXT2_SB_BSIZE_OFFSET, s_log_block_size);
+#undef A
+}
 
 /*
  * ext2 mount options
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index be7a8d0..cfedb2c 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -3,10 +3,7 @@
  * Handler for storing security labels as extended attributes.
  */
 
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext2_fs.h>
+#include "ext2.h"
 #include <linux/security.h>
 #include "xattr.h"
 
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 2989467..7e19257 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -5,10 +5,7 @@
  * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
  */
 
-#include <linux/string.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext2_fs.h>
+#include "ext2.h"
 #include "xattr.h"
 
 static size_t
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index 322a56b..1c33128 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -9,8 +9,6 @@
 #include <linux/fs.h>
 #include <linux/genhd.h>
 #include <linux/buffer_head.h>
-#include <linux/ext2_fs_sb.h>
-#include <linux/ext2_fs.h>
 #include <linux/blkdev.h>
 #include "ext2.h"
 #include "xip.h"
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 3091f62..c76832c 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -4,13 +4,7 @@
  * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  */
 
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 1e036b7..baac1b1 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -11,17 +11,9 @@
  *        David S. Miller (davem@caip.rutgers.edu), 1995
  */
 
-#include <linux/time.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
 #include <linux/quotaops.h>
-#include <linux/buffer_head.h>
 #include <linux/blkdev.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
 
 /*
  * balloc.c contains the blocks allocation and deallocation routines
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index 6afc39d..909d13e 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -7,9 +7,7 @@
  * Universite Pierre et Marie Curie (Paris VI)
  */
 
-#include <linux/buffer_head.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 
 #ifdef EXT3FS_DEBUG
 
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 34f0a07..cc761ad8 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -21,12 +21,7 @@
  *
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/buffer_head.h>
-#include <linux/slab.h>
-#include <linux/rbtree.h>
+#include "ext3.h"
 
 static unsigned char ext3_filetype_table[] = {
 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
diff --git a/include/linux/ext3_fs.h b/fs/ext3/ext3.h
similarity index 67%
rename from include/linux/ext3_fs.h
rename to fs/ext3/ext3.h
index f5a84eef..b6515fd 100644
--- a/include/linux/ext3_fs.h
+++ b/fs/ext3/ext3.h
@@ -1,5 +1,11 @@
 /*
- *  linux/include/linux/ext3_fs.h
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
  *
  * Copyright (C) 1992, 1993, 1994, 1995
  * Remy Card (card@masi.ibp.fr)
@@ -13,12 +19,11 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  */
 
-#ifndef _LINUX_EXT3_FS_H
-#define _LINUX_EXT3_FS_H
-
-#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
 #include <linux/magic.h>
 #include <linux/bug.h>
+#include <linux/blockgroup_lock.h>
 
 /*
  * The second extended filesystem constants/structures
@@ -75,29 +80,12 @@
 #define EXT3_MIN_BLOCK_SIZE		1024
 #define	EXT3_MAX_BLOCK_SIZE		65536
 #define EXT3_MIN_BLOCK_LOG_SIZE		10
-#ifdef __KERNEL__
-# define EXT3_BLOCK_SIZE(s)		((s)->s_blocksize)
-#else
-# define EXT3_BLOCK_SIZE(s)		(EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
-#endif
+#define EXT3_BLOCK_SIZE(s)		((s)->s_blocksize)
 #define	EXT3_ADDR_PER_BLOCK(s)		(EXT3_BLOCK_SIZE(s) / sizeof (__u32))
-#ifdef __KERNEL__
-# define EXT3_BLOCK_SIZE_BITS(s)	((s)->s_blocksize_bits)
-#else
-# define EXT3_BLOCK_SIZE_BITS(s)	((s)->s_log_block_size + 10)
-#endif
-#ifdef __KERNEL__
+#define EXT3_BLOCK_SIZE_BITS(s)	((s)->s_blocksize_bits)
 #define	EXT3_ADDR_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_addr_per_block_bits)
 #define EXT3_INODE_SIZE(s)		(EXT3_SB(s)->s_inode_size)
 #define EXT3_FIRST_INO(s)		(EXT3_SB(s)->s_first_ino)
-#else
-#define EXT3_INODE_SIZE(s)	(((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
-				 EXT3_GOOD_OLD_INODE_SIZE : \
-				 (s)->s_inode_size)
-#define EXT3_FIRST_INO(s)	(((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
-				 EXT3_GOOD_OLD_FIRST_INO : \
-				 (s)->s_first_ino)
-#endif
 
 /*
  * Macro-instructions used to manage fragments
@@ -105,13 +93,8 @@
 #define EXT3_MIN_FRAG_SIZE		1024
 #define	EXT3_MAX_FRAG_SIZE		4096
 #define EXT3_MIN_FRAG_LOG_SIZE		  10
-#ifdef __KERNEL__
-# define EXT3_FRAG_SIZE(s)		(EXT3_SB(s)->s_frag_size)
-# define EXT3_FRAGS_PER_BLOCK(s)	(EXT3_SB(s)->s_frags_per_block)
-#else
-# define EXT3_FRAG_SIZE(s)		(EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
-# define EXT3_FRAGS_PER_BLOCK(s)	(EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
-#endif
+#define EXT3_FRAG_SIZE(s)		(EXT3_SB(s)->s_frag_size)
+#define EXT3_FRAGS_PER_BLOCK(s)		(EXT3_SB(s)->s_frags_per_block)
 
 /*
  * Structure of a blocks group descriptor
@@ -131,16 +114,10 @@
 /*
  * Macro-instructions used to manage group descriptors
  */
-#ifdef __KERNEL__
-# define EXT3_BLOCKS_PER_GROUP(s)	(EXT3_SB(s)->s_blocks_per_group)
-# define EXT3_DESC_PER_BLOCK(s)		(EXT3_SB(s)->s_desc_per_block)
-# define EXT3_INODES_PER_GROUP(s)	(EXT3_SB(s)->s_inodes_per_group)
-# define EXT3_DESC_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_desc_per_block_bits)
-#else
-# define EXT3_BLOCKS_PER_GROUP(s)	((s)->s_blocks_per_group)
-# define EXT3_DESC_PER_BLOCK(s)		(EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
-# define EXT3_INODES_PER_GROUP(s)	((s)->s_inodes_per_group)
-#endif
+#define EXT3_BLOCKS_PER_GROUP(s)	(EXT3_SB(s)->s_blocks_per_group)
+#define EXT3_DESC_PER_BLOCK(s)		(EXT3_SB(s)->s_desc_per_block)
+#define EXT3_INODES_PER_GROUP(s)	(EXT3_SB(s)->s_inodes_per_group)
+#define EXT3_DESC_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_desc_per_block_bits)
 
 /*
  * Constants relative to the data blocks
@@ -336,7 +313,6 @@
 
 #define i_size_high	i_dir_acl
 
-#if defined(__KERNEL__) || defined(__linux__)
 #define i_reserved1	osd1.linux1.l_i_reserved1
 #define i_frag		osd2.linux2.l_i_frag
 #define i_fsize		osd2.linux2.l_i_fsize
@@ -346,24 +322,6 @@
 #define i_gid_high	osd2.linux2.l_i_gid_high
 #define i_reserved2	osd2.linux2.l_i_reserved2
 
-#elif defined(__GNU__)
-
-#define i_translator	osd1.hurd1.h_i_translator
-#define i_frag		osd2.hurd2.h_i_frag;
-#define i_fsize		osd2.hurd2.h_i_fsize;
-#define i_uid_high	osd2.hurd2.h_i_uid_high
-#define i_gid_high	osd2.hurd2.h_i_gid_high
-#define i_author	osd2.hurd2.h_i_author
-
-#elif defined(__masix__)
-
-#define i_reserved1	osd1.masix1.m_i_reserved1
-#define i_frag		osd2.masix2.m_i_frag
-#define i_fsize		osd2.masix2.m_i_fsize
-#define i_reserved2	osd2.masix2.m_i_reserved2
-
-#endif /* defined(__KERNEL__) || defined(__linux__) */
-
 /*
  * File system states
  */
@@ -531,9 +489,197 @@
 	__u32   s_reserved[162];        /* Padding to the end of the block */
 };
 
-#ifdef __KERNEL__
-#include <linux/ext3_fs_i.h>
-#include <linux/ext3_fs_sb.h>
+/* data type for block offset of block group */
+typedef int ext3_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long ext3_fsblk_t;
+
+#define E3FSBLK "%lu"
+
+struct ext3_reserve_window {
+	ext3_fsblk_t	_rsv_start;	/* First byte reserved */
+	ext3_fsblk_t	_rsv_end;	/* Last byte reserved or 0 */
+};
+
+struct ext3_reserve_window_node {
+	struct rb_node		rsv_node;
+	__u32			rsv_goal_size;
+	__u32			rsv_alloc_hit;
+	struct ext3_reserve_window	rsv_window;
+};
+
+struct ext3_block_alloc_info {
+	/* information about reservation window */
+	struct ext3_reserve_window_node	rsv_window_node;
+	/*
+	 * was i_next_alloc_block in ext3_inode_info
+	 * is the logical (file-relative) number of the
+	 * most-recently-allocated block in this file.
+	 * We use this for detecting linearly ascending allocation requests.
+	 */
+	__u32                   last_alloc_logical_block;
+	/*
+	 * Was i_next_alloc_goal in ext3_inode_info
+	 * is the *physical* companion to i_next_alloc_block.
+	 * it the physical block number of the block which was most-recentl
+	 * allocated to this file.  This give us the goal (target) for the next
+	 * allocation when we detect linearly ascending requests.
+	 */
+	ext3_fsblk_t		last_alloc_physical_block;
+};
+
+#define rsv_start rsv_window._rsv_start
+#define rsv_end rsv_window._rsv_end
+
+/*
+ * third extended file system inode data in memory
+ */
+struct ext3_inode_info {
+	__le32	i_data[15];	/* unconverted */
+	__u32	i_flags;
+#ifdef EXT3_FRAGMENTS
+	__u32	i_faddr;
+	__u8	i_frag_no;
+	__u8	i_frag_size;
+#endif
+	ext3_fsblk_t	i_file_acl;
+	__u32	i_dir_acl;
+	__u32	i_dtime;
+
+	/*
+	 * i_block_group is the number of the block group which contains
+	 * this file's inode.  Constant across the lifetime of the inode,
+	 * it is ued for making block allocation decisions - we try to
+	 * place a file's data blocks near its inode block, and new inodes
+	 * near to their parent directory's inode.
+	 */
+	__u32	i_block_group;
+	unsigned long	i_state_flags;	/* Dynamic state flags for ext3 */
+
+	/* block reservation info */
+	struct ext3_block_alloc_info *i_block_alloc_info;
+
+	__u32	i_dir_start_lookup;
+#ifdef CONFIG_EXT3_FS_XATTR
+	/*
+	 * Extended attributes can be read independently of the main file
+	 * data. Taking i_mutex even when reading would cause contention
+	 * between readers of EAs and writers of regular file data, so
+	 * instead we synchronize on xattr_sem when reading or changing
+	 * EAs.
+	 */
+	struct rw_semaphore xattr_sem;
+#endif
+
+	struct list_head i_orphan;	/* unlinked but open inodes */
+
+	/*
+	 * i_disksize keeps track of what the inode size is ON DISK, not
+	 * in memory.  During truncate, i_size is set to the new size by
+	 * the VFS prior to calling ext3_truncate(), but the filesystem won't
+	 * set i_disksize to 0 until the truncate is actually under way.
+	 *
+	 * The intent is that i_disksize always represents the blocks which
+	 * are used by this file.  This allows recovery to restart truncate
+	 * on orphans if we crash during truncate.  We actually write i_disksize
+	 * into the on-disk inode when writing inodes out, instead of i_size.
+	 *
+	 * The only time when i_disksize and i_size may be different is when
+	 * a truncate is in progress.  The only things which change i_disksize
+	 * are ext3_get_block (growth) and ext3_truncate (shrinkth).
+	 */
+	loff_t	i_disksize;
+
+	/* on-disk additional length */
+	__u16 i_extra_isize;
+
+	/*
+	 * truncate_mutex is for serialising ext3_truncate() against
+	 * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
+	 * data tree are chopped off during truncate. We can't do that in
+	 * ext3 because whenever we perform intermediate commits during
+	 * truncate, the inode and all the metadata blocks *must* be in a
+	 * consistent state which allows truncation of the orphans to restart
+	 * during recovery.  Hence we must fix the get_block-vs-truncate race
+	 * by other means, so we have truncate_mutex.
+	 */
+	struct mutex truncate_mutex;
+
+	/*
+	 * Transactions that contain inode's metadata needed to complete
+	 * fsync and fdatasync, respectively.
+	 */
+	atomic_t i_sync_tid;
+	atomic_t i_datasync_tid;
+
+	struct inode vfs_inode;
+};
+
+/*
+ * third extended-fs super-block data in memory
+ */
+struct ext3_sb_info {
+	unsigned long s_frag_size;	/* Size of a fragment in bytes */
+	unsigned long s_frags_per_block;/* Number of fragments per block */
+	unsigned long s_inodes_per_block;/* Number of inodes per block */
+	unsigned long s_frags_per_group;/* Number of fragments in a group */
+	unsigned long s_blocks_per_group;/* Number of blocks in a group */
+	unsigned long s_inodes_per_group;/* Number of inodes in a group */
+	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
+	unsigned long s_gdb_count;	/* Number of group descriptor blocks */
+	unsigned long s_desc_per_block;	/* Number of group descriptors per block */
+	unsigned long s_groups_count;	/* Number of groups in the fs */
+	unsigned long s_overhead_last;  /* Last calculated overhead */
+	unsigned long s_blocks_last;    /* Last seen block count */
+	struct buffer_head * s_sbh;	/* Buffer containing the super block */
+	struct ext3_super_block * s_es;	/* Pointer to the super block in the buffer */
+	struct buffer_head ** s_group_desc;
+	unsigned long  s_mount_opt;
+	ext3_fsblk_t s_sb_block;
+	uid_t s_resuid;
+	gid_t s_resgid;
+	unsigned short s_mount_state;
+	unsigned short s_pad;
+	int s_addr_per_block_bits;
+	int s_desc_per_block_bits;
+	int s_inode_size;
+	int s_first_ino;
+	spinlock_t s_next_gen_lock;
+	u32 s_next_generation;
+	u32 s_hash_seed[4];
+	int s_def_hash_version;
+	int s_hash_unsigned;	/* 3 if hash should be signed, 0 if not */
+	struct percpu_counter s_freeblocks_counter;
+	struct percpu_counter s_freeinodes_counter;
+	struct percpu_counter s_dirs_counter;
+	struct blockgroup_lock *s_blockgroup_lock;
+
+	/* root of the per fs reservation window tree */
+	spinlock_t s_rsv_window_lock;
+	struct rb_root s_rsv_window_root;
+	struct ext3_reserve_window_node s_rsv_window_head;
+
+	/* Journaling */
+	struct inode * s_journal_inode;
+	struct journal_s * s_journal;
+	struct list_head s_orphan;
+	struct mutex s_orphan_lock;
+	struct mutex s_resize_lock;
+	unsigned long s_commit_interval;
+	struct block_device *journal_bdev;
+#ifdef CONFIG_QUOTA
+	char *s_qf_names[MAXQUOTAS];		/* Names of quota files with journalled quota */
+	int s_jquota_fmt;			/* Format of quota to use */
+#endif
+};
+
+static inline spinlock_t *
+sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
+{
+	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
+}
+
 static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
 {
 	return sb->s_fs_info;
@@ -576,12 +722,6 @@
 {
 	clear_bit(bit, &EXT3_I(inode)->i_state_flags);
 }
-#else
-/* Assume that user mode programs are passing in an ext3fs superblock, not
- * a kernel struct super_block.  This will allow us to call the feature-test
- * macros from user land. */
-#define EXT3_SB(sb)	(sb)
-#endif
 
 #define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
 
@@ -771,8 +911,6 @@
 #define DX_HASH_HALF_MD4_UNSIGNED	4
 #define DX_HASH_TEA_UNSIGNED		5
 
-#ifdef __KERNEL__
-
 /* hash info structure used by the directory hash */
 struct dx_hash_info
 {
@@ -974,7 +1112,211 @@
 extern const struct inode_operations ext3_symlink_inode_operations;
 extern const struct inode_operations ext3_fast_symlink_inode_operations;
 
+#define EXT3_JOURNAL(inode)	(EXT3_SB((inode)->i_sb)->s_journal)
 
-#endif	/* __KERNEL__ */
+/* Define the number of blocks we need to account to a transaction to
+ * modify one block of data.
+ *
+ * We may have to touch one inode, one bitmap buffer, up to three
+ * indirection blocks, the group and superblock summaries, and the data
+ * block to complete the transaction.  */
 
-#endif	/* _LINUX_EXT3_FS_H */
+#define EXT3_SINGLEDATA_TRANS_BLOCKS	8U
+
+/* Extended attribute operations touch at most two data buffers,
+ * two bitmap buffers, and two group summaries, in addition to the inode
+ * and the superblock, which are already accounted for. */
+
+#define EXT3_XATTR_TRANS_BLOCKS		6U
+
+/* Define the minimum size for a transaction which modifies data.  This
+ * needs to take into account the fact that we may end up modifying two
+ * quota files too (one for the group, one for the user quota).  The
+ * superblock only gets updated once, of course, so don't bother
+ * counting that again for the quota updates. */
+
+#define EXT3_DATA_TRANS_BLOCKS(sb)	(EXT3_SINGLEDATA_TRANS_BLOCKS + \
+					 EXT3_XATTR_TRANS_BLOCKS - 2 + \
+					 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
+
+/* Delete operations potentially hit one directory's namespace plus an
+ * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
+ * generous.  We can grow the delete transaction later if necessary. */
+
+#define EXT3_DELETE_TRANS_BLOCKS(sb)   (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
+
+/* Define an arbitrary limit for the amount of data we will anticipate
+ * writing to any given transaction.  For unbounded transactions such as
+ * write(2) and truncate(2) we can write more than this, but we always
+ * start off at the maximum transaction size and grow the transaction
+ * optimistically as we go. */
+
+#define EXT3_MAX_TRANS_DATA		64U
+
+/* We break up a large truncate or write transaction once the handle's
+ * buffer credits gets this low, we need either to extend the
+ * transaction or to start a new one.  Reserve enough space here for
+ * inode, bitmap, superblock, group and indirection updates for at least
+ * one block, plus two quota updates.  Quota allocations are not
+ * needed. */
+
+#define EXT3_RESERVE_TRANS_BLOCKS	12U
+
+#define EXT3_INDEX_EXTRA_TRANS_BLOCKS	8
+
+#ifdef CONFIG_QUOTA
+/* Amount of blocks needed for quota update - we know that the structure was
+ * allocated so we need to update only inode+data */
+#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+/* Amount of blocks needed for quota insert/delete - we do some block writes
+ * but inode, sb and group updates are done only once */
+#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
+		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
+#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
+		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
+#else
+#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
+#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
+#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
+#endif
+#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
+
+int
+ext3_mark_iloc_dirty(handle_t *handle,
+		     struct inode *inode,
+		     struct ext3_iloc *iloc);
+
+/*
+ * On success, We end up with an outstanding reference count against
+ * iloc->bh.  This _must_ be cleaned up later.
+ */
+
+int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
+			struct ext3_iloc *iloc);
+
+int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
+
+/*
+ * Wrapper functions with which ext3 calls into JBD.  The intent here is
+ * to allow these to be turned into appropriate stubs so ext3 can control
+ * ext2 filesystems, so ext2+ext3 systems only nee one fs.  This work hasn't
+ * been done yet.
+ */
+
+static inline void ext3_journal_release_buffer(handle_t *handle,
+						struct buffer_head *bh)
+{
+	journal_release_buffer(handle, bh);
+}
+
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+		struct buffer_head *bh, handle_t *handle, int err);
+
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
+
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
+
+int __ext3_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh);
+
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+				unsigned long blocknr, struct buffer_head *bh);
+
+int __ext3_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh);
+
+int __ext3_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh);
+
+#define ext3_journal_get_undo_access(handle, bh) \
+	__ext3_journal_get_undo_access(__func__, (handle), (bh))
+#define ext3_journal_get_write_access(handle, bh) \
+	__ext3_journal_get_write_access(__func__, (handle), (bh))
+#define ext3_journal_revoke(handle, blocknr, bh) \
+	__ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
+#define ext3_journal_get_create_access(handle, bh) \
+	__ext3_journal_get_create_access(__func__, (handle), (bh))
+#define ext3_journal_dirty_metadata(handle, bh) \
+	__ext3_journal_dirty_metadata(__func__, (handle), (bh))
+#define ext3_journal_forget(handle, bh) \
+	__ext3_journal_forget(__func__, (handle), (bh))
+
+int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
+
+handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
+int __ext3_journal_stop(const char *where, handle_t *handle);
+
+static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
+{
+	return ext3_journal_start_sb(inode->i_sb, nblocks);
+}
+
+#define ext3_journal_stop(handle) \
+	__ext3_journal_stop(__func__, (handle))
+
+static inline handle_t *ext3_journal_current_handle(void)
+{
+	return journal_current_handle();
+}
+
+static inline int ext3_journal_extend(handle_t *handle, int nblocks)
+{
+	return journal_extend(handle, nblocks);
+}
+
+static inline int ext3_journal_restart(handle_t *handle, int nblocks)
+{
+	return journal_restart(handle, nblocks);
+}
+
+static inline int ext3_journal_blocks_per_page(struct inode *inode)
+{
+	return journal_blocks_per_page(inode);
+}
+
+static inline int ext3_journal_force_commit(journal_t *journal)
+{
+	return journal_force_commit(journal);
+}
+
+/* super.c */
+int ext3_force_commit(struct super_block *sb);
+
+static inline int ext3_should_journal_data(struct inode *inode)
+{
+	if (!S_ISREG(inode->i_mode))
+		return 1;
+	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
+		return 1;
+	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+		return 1;
+	return 0;
+}
+
+static inline int ext3_should_order_data(struct inode *inode)
+{
+	if (!S_ISREG(inode->i_mode))
+		return 0;
+	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+		return 0;
+	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
+		return 1;
+	return 0;
+}
+
+static inline int ext3_should_writeback_data(struct inode *inode)
+{
+	if (!S_ISREG(inode->i_mode))
+		return 0;
+	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+		return 0;
+	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
+		return 1;
+	return 0;
+}
+
+#include <trace/events/ext3.h>
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
index d401f14..785a326 100644
--- a/fs/ext3/ext3_jbd.c
+++ b/fs/ext3/ext3_jbd.c
@@ -2,7 +2,7 @@
  * Interface between ext3 and JBD
  */
 
-#include <linux/ext3_jbd.h>
+#include "ext3.h"
 
 int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
 				struct buffer_head *bh)
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 724df69..25cb413 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -18,12 +18,8 @@
  *	(jj@sunsite.ms.mff.cuni.cz)
  */
 
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
 #include <linux/quotaops.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 1860ed3..d4dff27 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -22,15 +22,9 @@
  * we can depend on generic_block_fdatasync() to sync the data blocks.
  */
 
-#include <linux/time.h>
 #include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
 #include <linux/writeback.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
 
 /*
  * akpm: A new design for ext3_sync_file().
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index 7d215b4..d10231d 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -9,9 +9,7 @@
  * License.
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include <linux/cryptohash.h>
 
 #define DELTA 0x9E3779B9
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 1cde284..e3c39e4 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -12,21 +12,10 @@
  *        David S. Miller (davem@caip.rutgers.edu), 1995
  */
 
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/stat.h>
-#include <linux/string.h>
 #include <linux/quotaops.h>
-#include <linux/buffer_head.h>
 #include <linux/random.h>
-#include <linux/bitops.h>
-#include <trace/events/ext3.h>
 
-#include <asm/byteorder.h>
-
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 6d34186..10d7812 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -22,22 +22,12 @@
  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  */
 
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/ext3_jbd.h>
-#include <linux/jbd.h>
 #include <linux/highuid.h>
-#include <linux/pagemap.h>
 #include <linux/quotaops.h>
-#include <linux/string.h>
-#include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/mpage.h>
-#include <linux/uio.h>
-#include <linux/bio.h>
-#include <linux/fiemap.h>
 #include <linux/namei.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 4af574c..677a5c2 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -7,15 +7,10 @@
  * Universite Pierre et Marie Curie (Paris VI)
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/capability.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
 #include <linux/mount.h>
-#include <linux/time.h>
 #include <linux/compat.h>
 #include <asm/uaccess.h>
+#include "ext3.h"
 
 long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index e8e2117..d7940b2 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -24,20 +24,8 @@
  *	Theodore Ts'o, 2002
  */
 
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include <linux/jbd.h>
-#include <linux/time.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/fcntl.h>
-#include <linux/stat.h>
-#include <linux/string.h>
 #include <linux/quotaops.h>
-#include <linux/buffer_head.h>
-#include <linux/bio.h>
-#include <trace/events/ext3.h>
-
+#include "ext3.h"
 #include "namei.h"
 #include "xattr.h"
 #include "acl.h"
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 7916e4ce..0f814f3 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -11,10 +11,7 @@
 
 #define EXT3FS_DEBUG
 
-#include <linux/ext3_jbd.h>
-
-#include <linux/errno.h>
-#include <linux/slab.h>
+#include "ext3.h"
 
 
 #define outside(b, first, last)	((b) < (first) || (b) >= (last))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index e0b45b9..cf0b592 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -17,22 +17,12 @@
  */
 
 #include <linux/module.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/parser.h>
-#include <linux/buffer_head.h>
 #include <linux/exportfs.h>
-#include <linux/vfs.h>
+#include <linux/statfs.h>
 #include <linux/random.h>
 #include <linux/mount.h>
-#include <linux/namei.h>
 #include <linux/quotaops.h>
 #include <linux/seq_file.h>
 #include <linux/log2.h>
@@ -40,13 +30,13 @@
 
 #include <asm/uaccess.h>
 
+#define CREATE_TRACE_POINTS
+
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 #include "namei.h"
 
-#define CREATE_TRACE_POINTS
-#include <trace/events/ext3.h>
-
 #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED
   #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA
 #else
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
index 7c48982..6b01c3e 100644
--- a/fs/ext3/symlink.c
+++ b/fs/ext3/symlink.c
@@ -17,10 +17,8 @@
  *  ext3 symlink handling code
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
 #include <linux/namei.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index d565759..d22ebb7 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -50,14 +50,9 @@
  * by the buffer lock.
  */
 
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include <linux/mbcache.h>
 #include <linux/quotaops.h>
-#include <linux/rwsem.h>
 #include "xattr.h"
 #include "acl.h"
 
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index ea26f2a..3387664 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -3,12 +3,8 @@
  * Handler for storing security labels as extended attributes.
  */
 
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
 #include <linux/security.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static size_t
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index 2526a88..d75727c 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -5,11 +5,7 @@
  * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
  */
 
-#include <linux/string.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static size_t
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index b32e473..5612af3 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -5,10 +5,7 @@
  * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
  */
 
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static size_t
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index c465ae0..eb08c9e 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,10 +1,6 @@
 config GFS2_FS
 	tristate "GFS2 file system support"
 	depends on (64BIT || LBDAF)
-	select DLM if GFS2_FS_LOCKING_DLM
-	select CONFIGFS_FS if GFS2_FS_LOCKING_DLM
-	select SYSFS if GFS2_FS_LOCKING_DLM
-	select IP_SCTP if DLM_SCTP
 	select FS_POSIX_ACL
 	select CRC32
 	select QUOTACTL
@@ -29,7 +25,8 @@
 
 config GFS2_FS_LOCKING_DLM
 	bool "GFS2 DLM locking"
-	depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && HOTPLUG
+	depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \
+		HOTPLUG && DLM && CONFIGFS_FS && SYSFS
 	help
 	  Multiple node locking module for GFS2
 
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 38b7a74..9b2ff0e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -807,7 +807,7 @@
 
 	if (inode == sdp->sd_rindex) {
 		adjust_fs_space(inode);
-		ip->i_gh.gh_flags |= GL_NOCACHE;
+		sdp->sd_rindex_uptodate = 0;
 	}
 
 	brelse(dibh);
@@ -873,7 +873,7 @@
 
 	if (inode == sdp->sd_rindex) {
 		adjust_fs_space(inode);
-		ip->i_gh.gh_flags |= GL_NOCACHE;
+		sdp->sd_rindex_uptodate = 0;
 	}
 
 	brelse(dibh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 197c5c4..03c04fe 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -724,7 +724,11 @@
 	int metadata;
 	unsigned int revokes = 0;
 	int x;
-	int error = 0;
+	int error;
+
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
 
 	if (!*top)
 		sm->sm_first = 0;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c35573a..a836056 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1844,6 +1844,10 @@
 	unsigned int x, size = len * sizeof(u64);
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
 	ht = kzalloc(size, GFP_NOFS);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 7683458..a3d2c9e 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -18,7 +18,6 @@
 #include <linux/mount.h>
 #include <linux/fs.h>
 #include <linux/gfs2_ondisk.h>
-#include <linux/ext2_fs.h>
 #include <linux/falloc.h>
 #include <linux/swap.h>
 #include <linux/crc32.h>
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index c98a60e..a9ba244 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1031,7 +1031,13 @@
 	struct buffer_head *bh;
 	struct gfs2_holder ghs[3];
 	struct gfs2_rgrpd *rgd;
-	int error = -EROFS;
+	int error;
+
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
+	error = -EROFS;
 
 	gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
 	gfs2_holder_init(ip->i_gl,  LM_ST_EXCLUSIVE, 0, ghs + 1);
@@ -1224,6 +1230,10 @@
 			return 0;
 	}
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	if (odip != ndip) {
 		error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
 					   0, &r_gh);
@@ -1345,7 +1355,6 @@
 	error = alloc_required;
 	if (error < 0)
 		goto out_gunlock;
-	error = 0;
 
 	if (alloc_required) {
 		struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 19bde40..3df65c9 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -332,9 +332,6 @@
 	struct rb_node *n, *next;
 	struct gfs2_rgrpd *cur;
 
-	if (gfs2_rindex_update(sdp))
-		return NULL;
-
 	spin_lock(&sdp->sd_rindex_spin);
 	n = sdp->sd_rindex_tree.rb_node;
 	while (n) {
@@ -640,6 +637,7 @@
 		return 0;
 
 	error = 0; /* someone else read in the rgrp; free it and ignore it */
+	gfs2_glock_put(rgd->rd_gl);
 
 fail:
 	kfree(rgd->rd_bits);
@@ -927,6 +925,10 @@
 	} else if (copy_from_user(&r, argp, sizeof(r)))
 		return -EFAULT;
 
+	ret = gfs2_rindex_update(sdp);
+	if (ret)
+		return ret;
+
 	rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
 	rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
 
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 2e5ba42..927f4df 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -238,6 +238,10 @@
 	unsigned int x;
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	if (GFS2_EA_IS_STUFFED(ea))
 		return 0;
 
@@ -1330,6 +1334,10 @@
 	unsigned int x;
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
@@ -1439,6 +1447,10 @@
 	struct gfs2_holder gh;
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
 	if (!rgd) {
 		gfs2_consist_inode(ip);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index ea25174..28cf06e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1031,7 +1031,6 @@
 	}
 
 	error = PTR_ERR(vfsmount);
-	unregister_filesystem(&hugetlbfs_fs_type);
 
  out:
 	kmem_cache_destroy(hugetlbfs_inode_cachep);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 926d020..922f146 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 404111b..2b60ce1 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/jffs2.h>
 #include <linux/mtd/mtd.h>
@@ -42,12 +44,13 @@
 
 	tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
 	if (IS_ERR(tsk)) {
-		printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk));
+		pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n",
+			-PTR_ERR(tsk));
 		complete(&c->gc_thread_exit);
 		ret = PTR_ERR(tsk);
 	} else {
 		/* Wait for it... */
-		D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid));
+		jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid);
 		wait_for_completion(&c->gc_thread_start);
 		ret = tsk->pid;
 	}
@@ -60,7 +63,7 @@
 	int wait = 0;
 	spin_lock(&c->erase_completion_lock);
 	if (c->gc_task) {
-		D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid));
+		jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid);
 		send_sig(SIGKILL, c->gc_task, 1);
 		wait = 1;
 	}
@@ -90,7 +93,7 @@
 		if (!jffs2_thread_should_wake(c)) {
 			set_current_state (TASK_INTERRUPTIBLE);
 			spin_unlock(&c->erase_completion_lock);
-			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
+			jffs2_dbg(1, "%s(): sleeping...\n", __func__);
 			schedule();
 		} else
 			spin_unlock(&c->erase_completion_lock);
@@ -109,7 +112,7 @@
 		schedule_timeout_interruptible(msecs_to_jiffies(50));
 
 		if (kthread_should_stop()) {
-			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread():  kthread_stop() called.\n"));
+			jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
 			goto die;
 		}
 
@@ -126,28 +129,32 @@
 
 			switch(signr) {
 			case SIGSTOP:
-				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
+				jffs2_dbg(1, "%s(): SIGSTOP received\n",
+					  __func__);
 				set_current_state(TASK_STOPPED);
 				schedule();
 				break;
 
 			case SIGKILL:
-				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
+				jffs2_dbg(1, "%s(): SIGKILL received\n",
+					  __func__);
 				goto die;
 
 			case SIGHUP:
-				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
+				jffs2_dbg(1, "%s(): SIGHUP received\n",
+					  __func__);
 				break;
 			default:
-				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
+				jffs2_dbg(1, "%s(): signal %ld received\n",
+					  __func__, signr);
 			}
 		}
 		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
 		disallow_signal(SIGHUP);
 
-		D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
+		jffs2_dbg(1, "%s(): pass\n", __func__);
 		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
-			printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
+			pr_notice("No space for garbage collection. Aborting GC thread\n");
 			goto die;
 		}
 	}
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 3005ec4..a3750f9 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -307,8 +309,8 @@
 	   trying to GC to make more space. It'll be a fruitless task */
 	c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
 
-	dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
-		  c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
+	dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
+		    c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
 	dbg_fsbuild("Blocks required to allow deletion:    %d (%d KiB)\n",
 		  c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
 	dbg_fsbuild("Blocks required to allow writes:      %d (%d KiB)\n",
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 96ed3c9..4849a4c 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -12,6 +12,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "compr.h"
 
 static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
@@ -79,7 +81,7 @@
 
 	output_buf = kmalloc(*cdatalen, GFP_KERNEL);
 	if (!output_buf) {
-		printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
+		pr_warn("No memory for compressor allocation. Compression failed.\n");
 		return ret;
 	}
 	orig_slen = *datalen;
@@ -188,7 +190,8 @@
 				tmp_buf = kmalloc(orig_slen, GFP_KERNEL);
 				spin_lock(&jffs2_compressor_list_lock);
 				if (!tmp_buf) {
-					printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n", orig_slen);
+					pr_warn("No memory for compressor allocation. (%d bytes)\n",
+						orig_slen);
 					continue;
 				}
 				else {
@@ -235,7 +238,7 @@
 				cpage_out, datalen, cdatalen);
 		break;
 	default:
-		printk(KERN_ERR "JFFS2: unknown compression mode.\n");
+		pr_err("unknown compression mode\n");
 	}
 
 	if (ret == JFFS2_COMPR_NONE) {
@@ -277,7 +280,8 @@
 				ret = this->decompress(cdata_in, data_out, cdatalen, datalen);
 				spin_lock(&jffs2_compressor_list_lock);
 				if (ret) {
-					printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret);
+					pr_warn("Decompressor \"%s\" returned %d\n",
+						this->name, ret);
 				}
 				else {
 					this->stat_decompr_blocks++;
@@ -287,7 +291,7 @@
 				return ret;
 			}
 		}
-		printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype);
+		pr_warn("compression type 0x%02x not available\n", comprtype);
 		spin_unlock(&jffs2_compressor_list_lock);
 		return -EIO;
 	}
@@ -299,7 +303,7 @@
 	struct jffs2_compressor *this;
 
 	if (!comp->name) {
-		printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n");
+		pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n");
 		return -1;
 	}
 	comp->compr_buf_size=0;
@@ -309,7 +313,7 @@
 	comp->stat_compr_new_size=0;
 	comp->stat_compr_blocks=0;
 	comp->stat_decompr_blocks=0;
-	D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name));
+	jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name);
 
 	spin_lock(&jffs2_compressor_list_lock);
 
@@ -332,15 +336,15 @@
 
 int jffs2_unregister_compressor(struct jffs2_compressor *comp)
 {
-	D2(struct jffs2_compressor *this;)
+	D2(struct jffs2_compressor *this);
 
-	D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name));
+	jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name);
 
 	spin_lock(&jffs2_compressor_list_lock);
 
 	if (comp->usecount) {
 		spin_unlock(&jffs2_compressor_list_lock);
-		printk(KERN_WARNING "JFFS2: Compressor module is in use. Unregister failed.\n");
+		pr_warn("Compressor module is in use. Unregister failed.\n");
 		return -1;
 	}
 	list_del(&comp->list);
@@ -377,17 +381,17 @@
 /* Setting default compression mode */
 #ifdef CONFIG_JFFS2_CMODE_NONE
 	jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
-	D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");)
+	jffs2_dbg(1, "default compression mode: none\n");
 #else
 #ifdef CONFIG_JFFS2_CMODE_SIZE
 	jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
-	D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");)
+	jffs2_dbg(1, "default compression mode: size\n");
 #else
 #ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
 	jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO;
-	D1(printk(KERN_INFO "JFFS2: default compression mode: favourlzo\n");)
+	jffs2_dbg(1, "default compression mode: favourlzo\n");
 #else
-	D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");)
+	jffs2_dbg(1, "default compression mode: priority\n");
 #endif
 #endif
 #endif
diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c
index af186ee..c553bd6 100644
--- a/fs/jffs2/compr_lzo.c
+++ b/fs/jffs2/compr_lzo.c
@@ -33,7 +33,6 @@
 	lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
 
 	if (!lzo_mem || !lzo_compress_buf) {
-		printk(KERN_WARNING "Failed to allocate lzo deflate workspace\n");
 		free_workspace();
 		return -ENOMEM;
 	}
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index 9e7cec8..92e0644b 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/jffs2.h>
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 5a00102..0b9a1e4 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -14,6 +14,8 @@
 #error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
 #endif
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/zlib.h>
 #include <linux/zutil.h>
@@ -42,18 +44,18 @@
 {
 	def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
 							MAX_MEM_LEVEL));
-	if (!def_strm.workspace) {
-		printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
+	if (!def_strm.workspace)
 		return -ENOMEM;
-	}
-	D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)));
+
+	jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n",
+		  zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
 	inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
 	if (!inf_strm.workspace) {
-		printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
 		vfree(def_strm.workspace);
 		return -ENOMEM;
 	}
-	D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize()));
+	jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n",
+		  zlib_inflate_workspacesize());
 	return 0;
 }
 
@@ -79,7 +81,7 @@
 	mutex_lock(&deflate_mutex);
 
 	if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
-		printk(KERN_WARNING "deflateInit failed\n");
+		pr_warn("deflateInit failed\n");
 		mutex_unlock(&deflate_mutex);
 		return -1;
 	}
@@ -93,13 +95,14 @@
 	while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
 		def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
 		def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
-		D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
-			  def_strm.avail_in, def_strm.avail_out));
+		jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n",
+			  def_strm.avail_in, def_strm.avail_out);
 		ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
-		D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
-			  def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out));
+		jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
+			  def_strm.avail_in, def_strm.avail_out,
+			  def_strm.total_in, def_strm.total_out);
 		if (ret != Z_OK) {
-			D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
+			jffs2_dbg(1, "deflate in loop returned %d\n", ret);
 			zlib_deflateEnd(&def_strm);
 			mutex_unlock(&deflate_mutex);
 			return -1;
@@ -111,20 +114,20 @@
 	zlib_deflateEnd(&def_strm);
 
 	if (ret != Z_STREAM_END) {
-		D1(printk(KERN_DEBUG "final deflate returned %d\n", ret));
+		jffs2_dbg(1, "final deflate returned %d\n", ret);
 		ret = -1;
 		goto out;
 	}
 
 	if (def_strm.total_out >= def_strm.total_in) {
-		D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n",
-			  def_strm.total_in, def_strm.total_out));
+		jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n",
+			  def_strm.total_in, def_strm.total_out);
 		ret = -1;
 		goto out;
 	}
 
-	D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n",
-		  def_strm.total_in, def_strm.total_out));
+	jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n",
+		  def_strm.total_in, def_strm.total_out);
 
 	*dstlen = def_strm.total_out;
 	*sourcelen = def_strm.total_in;
@@ -157,18 +160,18 @@
 	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
 	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
 
-		D2(printk(KERN_DEBUG "inflate skipping adler32\n"));
+		jffs2_dbg(2, "inflate skipping adler32\n");
 		wbits = -((data_in[0] >> 4) + 8);
 		inf_strm.next_in += 2;
 		inf_strm.avail_in -= 2;
 	} else {
 		/* Let this remain D1 for now -- it should never happen */
-		D1(printk(KERN_DEBUG "inflate not skipping adler32\n"));
+		jffs2_dbg(1, "inflate not skipping adler32\n");
 	}
 
 
 	if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
-		printk(KERN_WARNING "inflateInit failed\n");
+		pr_warn("inflateInit failed\n");
 		mutex_unlock(&inflate_mutex);
 		return 1;
 	}
@@ -176,7 +179,7 @@
 	while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
 		;
 	if (ret != Z_STREAM_END) {
-		printk(KERN_NOTICE "inflate returned %d\n", ret);
+		pr_notice("inflate returned %d\n", ret);
 	}
 	zlib_inflateEnd(&inf_strm);
 	mutex_unlock(&inflate_mutex);
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index e0b76c8..1090eb6 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/pagemap.h>
@@ -261,12 +263,15 @@
 		bad += c->sector_size;
 	}
 
-#define check(sz) \
-	if (sz != c->sz##_size) {			\
-		printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \
-		       sz, c->sz##_size);		\
-		dump = 1;				\
-	}
+#define check(sz)							\
+do {									\
+	if (sz != c->sz##_size) {					\
+		pr_warn("%s_size mismatch counted 0x%x, c->%s_size 0x%x\n", \
+			#sz, sz, #sz, c->sz##_size);			\
+		dump = 1;						\
+	}								\
+} while (0)
+
 	check(free);
 	check(dirty);
 	check(used);
@@ -274,11 +279,12 @@
 	check(unchecked);
 	check(bad);
 	check(erasing);
+
 #undef check
 
 	if (nr_counted != c->nr_blocks) {
-		printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
-		       __func__, nr_counted, c->nr_blocks);
+		pr_warn("%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
+			__func__, nr_counted, c->nr_blocks);
 		dump = 1;
 	}
 
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
index c4f8eef..4fd9be4 100644
--- a/fs/jffs2/debug.h
+++ b/fs/jffs2/debug.h
@@ -51,6 +51,7 @@
  * superseded by nicer dbg_xxx() macros...
  */
 #if CONFIG_JFFS2_FS_DEBUG > 0
+#define DEBUG
 #define D1(x) x
 #else
 #define D1(x)
@@ -62,50 +63,33 @@
 #define D2(x)
 #endif
 
+#define jffs2_dbg(level, fmt, ...)		\
+do {						\
+	if (CONFIG_JFFS2_FS_DEBUG >= level)	\
+		pr_debug(fmt, ##__VA_ARGS__);	\
+} while (0)
+
 /* The prefixes of JFFS2 messages */
+#define JFFS2_DBG		KERN_DEBUG
 #define JFFS2_DBG_PREFIX	"[JFFS2 DBG]"
-#define JFFS2_ERR_PREFIX	"JFFS2 error:"
-#define JFFS2_WARN_PREFIX	"JFFS2 warning:"
-#define JFFS2_NOTICE_PREFIX	"JFFS2 notice:"
-
-#define JFFS2_ERR	KERN_ERR
-#define JFFS2_WARN	KERN_WARNING
-#define JFFS2_NOT	KERN_NOTICE
-#define JFFS2_DBG	KERN_DEBUG
-
 #define JFFS2_DBG_MSG_PREFIX	JFFS2_DBG JFFS2_DBG_PREFIX
-#define JFFS2_ERR_MSG_PREFIX	JFFS2_ERR JFFS2_ERR_PREFIX
-#define JFFS2_WARN_MSG_PREFIX	JFFS2_WARN JFFS2_WARN_PREFIX
-#define JFFS2_NOTICE_MSG_PREFIX	JFFS2_NOT JFFS2_NOTICE_PREFIX
 
 /* JFFS2 message macros */
-#define JFFS2_ERROR(fmt, ...)						\
-	do {								\
-		printk(JFFS2_ERR_MSG_PREFIX				\
-			" (%d) %s: " fmt, task_pid_nr(current),		\
-			__func__ , ##__VA_ARGS__);			\
-	} while(0)
+#define JFFS2_ERROR(fmt, ...)					\
+	pr_err("error: (%d) %s: " fmt,				\
+	       task_pid_nr(current), __func__, ##__VA_ARGS__)
 
 #define JFFS2_WARNING(fmt, ...)						\
-	do {								\
-		printk(JFFS2_WARN_MSG_PREFIX				\
-			" (%d) %s: " fmt, task_pid_nr(current),		\
-			__func__ , ##__VA_ARGS__);			\
-	} while(0)
+	pr_warn("warning: (%d) %s: " fmt,				\
+		task_pid_nr(current), __func__, ##__VA_ARGS__)
 
 #define JFFS2_NOTICE(fmt, ...)						\
-	do {								\
-		printk(JFFS2_NOTICE_MSG_PREFIX				\
-			" (%d) %s: " fmt, task_pid_nr(current),		\
-			__func__ , ##__VA_ARGS__);			\
-	} while(0)
+	pr_notice("notice: (%d) %s: " fmt,				\
+		  task_pid_nr(current), __func__, ##__VA_ARGS__)
 
 #define JFFS2_DEBUG(fmt, ...)						\
-	do {								\
-		printk(JFFS2_DBG_MSG_PREFIX				\
-			" (%d) %s: " fmt, task_pid_nr(current),		\
-			__func__ , ##__VA_ARGS__);			\
-	} while(0)
+	printk(KERN_DEBUG "[JFFS2 DBG] (%d) %s: " fmt,			\
+	       task_pid_nr(current), __func__, ##__VA_ARGS__)
 
 /*
  * We split our debugging messages on several parts, depending on the JFFS2
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 973ac58..b560188 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -79,7 +81,7 @@
 	uint32_t ino = 0;
 	struct inode *inode = NULL;
 
-	D1(printk(KERN_DEBUG "jffs2_lookup()\n"));
+	jffs2_dbg(1, "jffs2_lookup()\n");
 
 	if (target->d_name.len > JFFS2_MAX_NAME_LEN)
 		return ERR_PTR(-ENAMETOOLONG);
@@ -103,7 +105,7 @@
 	if (ino) {
 		inode = jffs2_iget(dir_i->i_sb, ino);
 		if (IS_ERR(inode))
-			printk(KERN_WARNING "iget() failed for ino #%u\n", ino);
+			pr_warn("iget() failed for ino #%u\n", ino);
 	}
 
 	return d_splice_alias(inode, target);
@@ -119,21 +121,22 @@
 	struct jffs2_full_dirent *fd;
 	unsigned long offset, curofs;
 
-	D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino));
+	jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n",
+		  filp->f_path.dentry->d_inode->i_ino);
 
 	f = JFFS2_INODE_INFO(inode);
 
 	offset = filp->f_pos;
 
 	if (offset == 0) {
-		D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino));
+		jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino);
 		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
 			goto out;
 		offset++;
 	}
 	if (offset == 1) {
 		unsigned long pino = parent_ino(filp->f_path.dentry);
-		D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino));
+		jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino);
 		if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
 			goto out;
 		offset++;
@@ -146,16 +149,18 @@
 		curofs++;
 		/* First loop: curofs = 2; offset = 2 */
 		if (curofs < offset) {
-			D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
-				  fd->name, fd->ino, fd->type, curofs, offset));
+			jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
+				  fd->name, fd->ino, fd->type, curofs, offset);
 			continue;
 		}
 		if (!fd->ino) {
-			D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name));
+			jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n",
+				  fd->name);
 			offset++;
 			continue;
 		}
-		D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type));
+		jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n",
+			  offset, fd->name, fd->ino, fd->type);
 		if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0)
 			break;
 		offset++;
@@ -184,12 +189,12 @@
 
 	c = JFFS2_SB_INFO(dir_i->i_sb);
 
-	D1(printk(KERN_DEBUG "jffs2_create()\n"));
+	jffs2_dbg(1, "%s()\n", __func__);
 
 	inode = jffs2_new_inode(dir_i, mode, ri);
 
 	if (IS_ERR(inode)) {
-		D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
+		jffs2_dbg(1, "jffs2_new_inode() failed\n");
 		jffs2_free_raw_inode(ri);
 		return PTR_ERR(inode);
 	}
@@ -217,9 +222,9 @@
 
 	jffs2_free_raw_inode(ri);
 
-	D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
-		  inode->i_ino, inode->i_mode, inode->i_nlink,
-		  f->inocache->pino_nlink, inode->i_mapping->nrpages));
+	jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
+		  __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
+		  f->inocache->pino_nlink, inode->i_mapping->nrpages);
 
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
@@ -362,14 +367,15 @@
 	/* We use f->target field to store the target path. */
 	f->target = kmemdup(target, targetlen + 1, GFP_KERNEL);
 	if (!f->target) {
-		printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
+		pr_warn("Can't allocate %d bytes of memory\n", targetlen + 1);
 		mutex_unlock(&f->sem);
 		jffs2_complete_reservation(c);
 		ret = -ENOMEM;
 		goto fail;
 	}
 
-	D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target));
+	jffs2_dbg(1, "%s(): symlink's target '%s' cached\n",
+		  __func__, (char *)f->target);
 
 	/* No data here. Only a metadata node, which will be
 	   obsoleted by the first data write
@@ -856,7 +862,8 @@
 			f->inocache->pino_nlink++;
 		mutex_unlock(&f->sem);
 
-		printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
+		pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
+			  __func__, ret);
 		/* Might as well let the VFS know */
 		d_instantiate(new_dentry, old_dentry->d_inode);
 		ihold(old_dentry->d_inode);
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index eafb8d3..4a6cf28 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
@@ -46,11 +48,12 @@
 #else /* Linux */
 	struct erase_info *instr;
 
-	D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n",
-				jeb->offset, jeb->offset, jeb->offset + c->sector_size));
+	jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
+		  __func__,
+		  jeb->offset, jeb->offset, jeb->offset + c->sector_size);
 	instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
 	if (!instr) {
-		printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
+		pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
 		mutex_lock(&c->erase_free_sem);
 		spin_lock(&c->erase_completion_lock);
 		list_move(&jeb->list, &c->erase_pending_list);
@@ -69,7 +72,6 @@
 	instr->len = c->sector_size;
 	instr->callback = jffs2_erase_callback;
 	instr->priv = (unsigned long)(&instr[1]);
-	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 
 	((struct erase_priv_struct *)instr->priv)->jeb = jeb;
 	((struct erase_priv_struct *)instr->priv)->c = c;
@@ -84,7 +86,8 @@
 
 	if (ret == -ENOMEM || ret == -EAGAIN) {
 		/* Erase failed immediately. Refile it on the list */
-		D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
+		jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
+			  jeb->offset, ret);
 		mutex_lock(&c->erase_free_sem);
 		spin_lock(&c->erase_completion_lock);
 		list_move(&jeb->list, &c->erase_pending_list);
@@ -97,9 +100,11 @@
 	}
 
 	if (ret == -EROFS)
-		printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
+		pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n",
+			jeb->offset);
 	else
-		printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
+		pr_warn("Erase at 0x%08x failed immediately: errno %d\n",
+			jeb->offset, ret);
 
 	jffs2_erase_failed(c, jeb, bad_offset);
 }
@@ -125,13 +130,14 @@
 
 			work_done++;
 			if (!--count) {
-				D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
+				jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n");
 				goto done;
 			}
 
 		} else if (!list_empty(&c->erase_pending_list)) {
 			jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
-			D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
+			jffs2_dbg(1, "Starting erase of pending block 0x%08x\n",
+				  jeb->offset);
 			list_del(&jeb->list);
 			c->erasing_size += c->sector_size;
 			c->wasted_size -= jeb->wasted_size;
@@ -159,13 +165,13 @@
 	spin_unlock(&c->erase_completion_lock);
 	mutex_unlock(&c->erase_free_sem);
  done:
-	D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
+	jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n");
 	return work_done;
 }
 
 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
 {
-	D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
+	jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset);
 	mutex_lock(&c->erase_free_sem);
 	spin_lock(&c->erase_completion_lock);
 	list_move_tail(&jeb->list, &c->erase_complete_list);
@@ -214,7 +220,7 @@
 	struct erase_priv_struct *priv = (void *)instr->priv;
 
 	if(instr->state != MTD_ERASE_DONE) {
-		printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
+		pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
 			(unsigned long long)instr->addr, instr->state);
 		jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
 	} else {
@@ -269,8 +275,8 @@
 		return;
 	}
 
-	D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
-		  jeb->offset, jeb->offset + c->sector_size, ic->ino));
+	jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
+		  jeb->offset, jeb->offset + c->sector_size, ic->ino);
 
 	D2({
 		int i=0;
@@ -281,7 +287,7 @@
 
 		printk(KERN_DEBUG);
 		while(this) {
-			printk(KERN_CONT "0x%08x(%d)->",
+			pr_cont("0x%08x(%d)->",
 			       ref_offset(this), ref_flags(this));
 			if (++i == 5) {
 				printk(KERN_DEBUG);
@@ -289,7 +295,7 @@
 			}
 			this = this->next_in_ino;
 		}
-		printk(KERN_CONT "\n");
+		pr_cont("\n");
 	});
 
 	switch (ic->class) {
@@ -310,7 +316,8 @@
 void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
 {
 	struct jffs2_raw_node_ref *block, *ref;
-	D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
+	jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n",
+		  jeb->offset);
 
 	block = ref = jeb->first_node;
 
@@ -342,12 +349,13 @@
 			&ebuf, NULL);
 	if (ret != -EOPNOTSUPP) {
 		if (ret) {
-			D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
+			jffs2_dbg(1, "MTD point failed %d\n", ret);
 			goto do_flash_read;
 		}
 		if (retlen < c->sector_size) {
 			/* Don't muck about if it won't let us point to the whole erase sector */
-			D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
+			jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
+				  retlen);
 			mtd_unpoint(c->mtd, jeb->offset, retlen);
 			goto do_flash_read;
 		}
@@ -359,8 +367,10 @@
 		} while(--retlen);
 		mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
 		if (retlen) {
-			printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
-			       *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf));
+			pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
+				*wordebuf,
+				jeb->offset +
+				c->sector_size-retlen * sizeof(*wordebuf));
 			return -EIO;
 		}
 		return 0;
@@ -368,11 +378,12 @@
  do_flash_read:
 	ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!ebuf) {
-		printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset);
+		pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
+			jeb->offset);
 		return -EAGAIN;
 	}
 
-	D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
+	jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset);
 
 	for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
 		uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
@@ -382,12 +393,14 @@
 
 		ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
 		if (ret) {
-			printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
+			pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
+				ofs, ret);
 			ret = -EIO;
 			goto fail;
 		}
 		if (retlen != readlen) {
-			printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
+			pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n",
+				ofs, readlen, retlen);
 			ret = -EIO;
 			goto fail;
 		}
@@ -396,7 +409,8 @@
 			unsigned long *datum = ebuf + i;
 			if (*datum + 1) {
 				*bad_offset += i;
-				printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
+				pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
+					*datum, *bad_offset);
 				ret = -EIO;
 				goto fail;
 			}
@@ -422,7 +436,7 @@
 	}
 
 	/* Write the erase complete marker */
-	D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
+	jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset);
 	bad_offset = jeb->offset;
 
 	/* Cleanmarker in oob area or no cleanmarker at all ? */
@@ -451,10 +465,10 @@
 
 		if (ret || retlen != sizeof(marker)) {
 			if (ret)
-				printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
+				pr_warn("Write clean marker to block at 0x%08x failed: %d\n",
 				       jeb->offset, ret);
 			else
-				printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
+				pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
 				       jeb->offset, sizeof(marker), retlen);
 
 			goto filebad;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 61e6723..db3889b 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/time.h>
@@ -85,7 +87,8 @@
 	unsigned char *pg_buf;
 	int ret;
 
-	D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));
+	jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
+		  __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT);
 
 	BUG_ON(!PageLocked(pg));
 
@@ -105,7 +108,7 @@
 	flush_dcache_page(pg);
 	kunmap(pg);
 
-	D2(printk(KERN_DEBUG "readpage finished\n"));
+	jffs2_dbg(2, "readpage finished\n");
 	return ret;
 }
 
@@ -144,7 +147,7 @@
 		return -ENOMEM;
 	*pagep = pg;
 
-	D1(printk(KERN_DEBUG "jffs2_write_begin()\n"));
+	jffs2_dbg(1, "%s()\n", __func__);
 
 	if (pageofs > inode->i_size) {
 		/* Make new hole frag from old EOF to new page */
@@ -153,8 +156,8 @@
 		struct jffs2_full_dnode *fn;
 		uint32_t alloc_len;
 
-		D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
-			  (unsigned int)inode->i_size, pageofs));
+		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+			  (unsigned int)inode->i_size, pageofs);
 
 		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
 					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
@@ -198,7 +201,8 @@
 			f->metadata = NULL;
 		}
 		if (ret) {
-			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret));
+			jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n",
+				  ret);
 			jffs2_mark_node_obsolete(c, fn->raw);
 			jffs2_free_full_dnode(fn);
 			jffs2_complete_reservation(c);
@@ -222,7 +226,7 @@
 		if (ret)
 			goto out_page;
 	}
-	D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags));
+	jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
 	return ret;
 
 out_page:
@@ -248,8 +252,9 @@
 	int ret = 0;
 	uint32_t writtenlen = 0;
 
-	D1(printk(KERN_DEBUG "jffs2_write_end(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
-		  inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
+	jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
+		  __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT,
+		  start, end, pg->flags);
 
 	/* We need to avoid deadlock with page_cache_read() in
 	   jffs2_garbage_collect_pass(). So the page must be
@@ -268,7 +273,8 @@
 	ri = jffs2_alloc_raw_inode();
 
 	if (!ri) {
-		D1(printk(KERN_DEBUG "jffs2_write_end(): Allocation of raw inode failed\n"));
+		jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
+			  __func__);
 		unlock_page(pg);
 		page_cache_release(pg);
 		return -ENOMEM;
@@ -315,13 +321,14 @@
 		/* generic_file_write has written more to the page cache than we've
 		   actually written to the medium. Mark the page !Uptodate so that
 		   it gets reread */
-		D1(printk(KERN_DEBUG "jffs2_write_end(): Not all bytes written. Marking page !uptodate\n"));
+		jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
+			__func__);
 		SetPageError(pg);
 		ClearPageUptodate(pg);
 	}
 
-	D1(printk(KERN_DEBUG "jffs2_write_end() returning %d\n",
-					writtenlen > 0 ? writtenlen : ret));
+	jffs2_dbg(1, "%s() returning %d\n",
+		  __func__, writtenlen > 0 ? writtenlen : ret);
 	unlock_page(pg);
 	page_cache_release(pg);
 	return writtenlen > 0 ? writtenlen : ret;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index c0d5c9d..bb6f993 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/capability.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -39,7 +41,7 @@
 	int ret;
 	int alloc_type = ALLOC_NORMAL;
 
-	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
+	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
 
 	/* Special cases - we don't want more than one data node
 	   for these types on the medium at any time. So setattr
@@ -50,7 +52,8 @@
 		/* For these, we don't actually need to read the old node */
 		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
 		mdata = (char *)&dev;
-		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
+		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
+			  __func__, mdatalen);
 	} else if (S_ISLNK(inode->i_mode)) {
 		mutex_lock(&f->sem);
 		mdatalen = f->metadata->size;
@@ -66,7 +69,8 @@
 			return ret;
 		}
 		mutex_unlock(&f->sem);
-		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
+		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
+			  __func__, mdatalen);
 	}
 
 	ri = jffs2_alloc_raw_inode();
@@ -233,7 +237,8 @@
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
 
-	D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
+		  __func__, inode->i_ino, inode->i_mode);
 	truncate_inode_pages(&inode->i_data, 0);
 	end_writeback(inode);
 	jffs2_do_clear_inode(c, f);
@@ -249,7 +254,7 @@
 	dev_t rdev = 0;
 	int ret;
 
-	D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino));
+	jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
 
 	inode = iget_locked(sb, ino);
 	if (!inode)
@@ -317,14 +322,16 @@
 		/* Read the device numbers from the media */
 		if (f->metadata->size != sizeof(jdev.old_id) &&
 		    f->metadata->size != sizeof(jdev.new_id)) {
-			printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
+			pr_notice("Device node has strange size %d\n",
+				  f->metadata->size);
 			goto error_io;
 		}
-		D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
+		jffs2_dbg(1, "Reading device numbers from flash\n");
 		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
 		if (ret < 0) {
 			/* Eep */
-			printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
+			pr_notice("Read device numbers for inode %lu failed\n",
+				  (unsigned long)inode->i_ino);
 			goto error;
 		}
 		if (f->metadata->size == sizeof(jdev.old_id))
@@ -339,12 +346,13 @@
 		break;
 
 	default:
-		printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
+		pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
+			__func__, inode->i_mode, (unsigned long)inode->i_ino);
 	}
 
 	mutex_unlock(&f->sem);
 
-	D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
+	jffs2_dbg(1, "jffs2_read_inode() returning\n");
 	unlock_new_inode(inode);
 	return inode;
 
@@ -362,11 +370,13 @@
 	struct iattr iattr;
 
 	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
-		D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
+		jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
+			  __func__, inode->i_ino);
 		return;
 	}
 
-	D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
+	jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
+		  __func__, inode->i_ino);
 
 	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
 	iattr.ia_mode = inode->i_mode;
@@ -414,7 +424,8 @@
 	struct jffs2_inode_info *f;
 	int ret;
 
-	D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
+	jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
+		  __func__, dir_i->i_ino, mode);
 
 	c = JFFS2_SB_INFO(sb);
 
@@ -504,11 +515,11 @@
 
 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
 	if (c->mtd->type == MTD_NANDFLASH) {
-		printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
+		pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
 		return -EINVAL;
 	}
 	if (c->mtd->type == MTD_DATAFLASH) {
-		printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n");
+		pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
 		return -EINVAL;
 	}
 #endif
@@ -522,12 +533,13 @@
 	 */
 	if ((c->sector_size * blocks) != c->flash_size) {
 		c->flash_size = c->sector_size * blocks;
-		printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
+		pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
 			c->flash_size / 1024);
 	}
 
 	if (c->flash_size < 5*c->sector_size) {
-		printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
+		pr_err("Too few erase blocks (%d)\n",
+		       c->flash_size / c->sector_size);
 		return -EINVAL;
 	}
 
@@ -550,17 +562,17 @@
 	if ((ret = jffs2_do_mount_fs(c)))
 		goto out_inohash;
 
-	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
+	jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
 	root_i = jffs2_iget(sb, 1);
 	if (IS_ERR(root_i)) {
-		D1(printk(KERN_WARNING "get root inode failed\n"));
+		jffs2_dbg(1, "get root inode failed\n");
 		ret = PTR_ERR(root_i);
 		goto out_root;
 	}
 
 	ret = -ENOMEM;
 
-	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
+	jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
 	sb->s_root = d_make_root(root_i);
 	if (!sb->s_root)
 		goto out_root;
@@ -618,20 +630,21 @@
 		*/
 		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
 		if (!inode) {
-			D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
-				  inum));
+			jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
+				  inum);
 
 			spin_lock(&c->inocache_lock);
 			ic = jffs2_get_ino_cache(c, inum);
 			if (!ic) {
-				D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
+				jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
+					  inum);
 				spin_unlock(&c->inocache_lock);
 				return NULL;
 			}
 			if (ic->state != INO_STATE_CHECKEDABSENT) {
 				/* Wait for progress. Don't just loop */
-				D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
-					  ic->ino, ic->state));
+				jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
+					  ic->ino, ic->state);
 				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
 			} else {
 				spin_unlock(&c->inocache_lock);
@@ -649,8 +662,8 @@
 			return ERR_CAST(inode);
 	}
 	if (is_bad_inode(inode)) {
-		printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n",
-		       inum, unlinked);
+		pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
+			  inum, unlinked);
 		/* NB. This will happen again. We need to do something appropriate here. */
 		iput(inode);
 		return ERR_PTR(-EIO);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 31dce61133..ad271c7 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/mtd/mtd.h>
 #include <linux/slab.h>
@@ -51,44 +53,44 @@
 	   number of free blocks is low. */
 again:
 	if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
-		D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
+		jffs2_dbg(1, "Picking block from bad_used_list to GC next\n");
 		nextlist = &c->bad_used_list;
 	} else if (n < 50 && !list_empty(&c->erasable_list)) {
 		/* Note that most of them will have gone directly to be erased.
 		   So don't favour the erasable_list _too_ much. */
-		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
+		jffs2_dbg(1, "Picking block from erasable_list to GC next\n");
 		nextlist = &c->erasable_list;
 	} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
 		/* Most of the time, pick one off the very_dirty list */
-		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
+		jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n");
 		nextlist = &c->very_dirty_list;
 	} else if (n < 126 && !list_empty(&c->dirty_list)) {
-		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
+		jffs2_dbg(1, "Picking block from dirty_list to GC next\n");
 		nextlist = &c->dirty_list;
 	} else if (!list_empty(&c->clean_list)) {
-		D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
+		jffs2_dbg(1, "Picking block from clean_list to GC next\n");
 		nextlist = &c->clean_list;
 	} else if (!list_empty(&c->dirty_list)) {
-		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
+		jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n");
 
 		nextlist = &c->dirty_list;
 	} else if (!list_empty(&c->very_dirty_list)) {
-		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
+		jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n");
 		nextlist = &c->very_dirty_list;
 	} else if (!list_empty(&c->erasable_list)) {
-		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
+		jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n");
 
 		nextlist = &c->erasable_list;
 	} else if (!list_empty(&c->erasable_pending_wbuf_list)) {
 		/* There are blocks are wating for the wbuf sync */
-		D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"));
+		jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
 		spin_unlock(&c->erase_completion_lock);
 		jffs2_flush_wbuf_pad(c);
 		spin_lock(&c->erase_completion_lock);
 		goto again;
 	} else {
 		/* Eep. All were empty */
-		D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
+		jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
 		return NULL;
 	}
 
@@ -97,13 +99,15 @@
 	c->gcblock = ret;
 	ret->gc_node = ret->first_node;
 	if (!ret->gc_node) {
-		printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
+		pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n",
+			ret->offset);
 		BUG();
 	}
 
 	/* Have we accidentally picked a clean block with wasted space ? */
 	if (ret->wasted_size) {
-		D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
+		jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n",
+			  ret->wasted_size);
 		ret->dirty_size += ret->wasted_size;
 		c->wasted_size -= ret->wasted_size;
 		c->dirty_size += ret->wasted_size;
@@ -140,8 +144,8 @@
 
 		/* checked_ino is protected by the alloc_sem */
 		if (c->checked_ino > c->highest_ino && xattr) {
-			printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
-			       c->unchecked_size);
+			pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
+				c->unchecked_size);
 			jffs2_dbg_dump_block_lists_nolock(c);
 			spin_unlock(&c->erase_completion_lock);
 			mutex_unlock(&c->alloc_sem);
@@ -163,8 +167,8 @@
 		}
 
 		if (!ic->pino_nlink) {
-			D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink/pino zero\n",
-				  ic->ino));
+			jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
+				  ic->ino);
 			spin_unlock(&c->inocache_lock);
 			jffs2_xattr_delete_inode(c, ic);
 			continue;
@@ -172,13 +176,15 @@
 		switch(ic->state) {
 		case INO_STATE_CHECKEDABSENT:
 		case INO_STATE_PRESENT:
-			D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
+			jffs2_dbg(1, "Skipping ino #%u already checked\n",
+				  ic->ino);
 			spin_unlock(&c->inocache_lock);
 			continue;
 
 		case INO_STATE_GC:
 		case INO_STATE_CHECKING:
-			printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
+			pr_warn("Inode #%u is in state %d during CRC check phase!\n",
+				ic->ino, ic->state);
 			spin_unlock(&c->inocache_lock);
 			BUG();
 
@@ -186,7 +192,8 @@
 			/* We need to wait for it to finish, lest we move on
 			   and trigger the BUG() above while we haven't yet
 			   finished checking all its nodes */
-			D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
+			jffs2_dbg(1, "Waiting for ino #%u to finish reading\n",
+				  ic->ino);
 			/* We need to come back again for the _same_ inode. We've
 			 made no progress in this case, but that should be OK */
 			c->checked_ino--;
@@ -204,11 +211,13 @@
 		ic->state = INO_STATE_CHECKING;
 		spin_unlock(&c->inocache_lock);
 
-		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
+		jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n",
+			  __func__, ic->ino);
 
 		ret = jffs2_do_crccheck_inode(c, ic);
 		if (ret)
-			printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
+			pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n",
+				ic->ino);
 
 		jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
 		mutex_unlock(&c->alloc_sem);
@@ -220,11 +229,11 @@
 	    !list_empty(&c->erase_pending_list)) {
 		spin_unlock(&c->erase_completion_lock);
 		mutex_unlock(&c->alloc_sem);
-		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n"));
+		jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__);
 		if (jffs2_erase_pending_blocks(c, 1))
 			return 0;
 
-		D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n"));
+		jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
 		spin_lock(&c->erase_completion_lock);
 		mutex_lock(&c->alloc_sem);
 	}
@@ -242,13 +251,14 @@
 			mutex_unlock(&c->alloc_sem);
 			return -EAGAIN;
 		}
-		D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+		jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n");
 		spin_unlock(&c->erase_completion_lock);
 		mutex_unlock(&c->alloc_sem);
 		return -EIO;
 	}
 
-	D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
+	jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n",
+		  jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size);
 	D1(if (c->nextblock)
 	   printk(KERN_DEBUG "Nextblock at  %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
 
@@ -261,12 +271,14 @@
 	gcblock_dirty = jeb->dirty_size;
 
 	while(ref_obsolete(raw)) {
-		D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
+		jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
+			  ref_offset(raw));
 		raw = ref_next(raw);
 		if (unlikely(!raw)) {
-			printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
-			printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
-			       jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
+			pr_warn("eep. End of raw list while still supposedly nodes to GC\n");
+			pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
+				jeb->offset, jeb->free_size,
+				jeb->dirty_size, jeb->used_size);
 			jeb->gc_node = raw;
 			spin_unlock(&c->erase_completion_lock);
 			mutex_unlock(&c->alloc_sem);
@@ -275,7 +287,8 @@
 	}
 	jeb->gc_node = raw;
 
-	D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
+	jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
+		  ref_offset(raw));
 
 	if (!raw->next_in_ino) {
 		/* Inode-less node. Clean marker, snapshot or something like that */
@@ -316,7 +329,9 @@
 
 	spin_unlock(&c->erase_completion_lock);
 
-	D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
+	jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n",
+		  __func__, jeb->offset, ref_offset(raw), ref_flags(raw),
+		  ic->ino);
 
 	/* Three possibilities:
 	   1. Inode is already in-core. We must iget it and do proper
@@ -336,8 +351,8 @@
 		if (ref_flags(raw) == REF_PRISTINE)
 			ic->state = INO_STATE_GC;
 		else {
-			D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
-				  ic->ino));
+			jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
+				  ic->ino);
 		}
 		break;
 
@@ -353,8 +368,8 @@
 		   we're holding the alloc_sem, no other garbage collection
 		   can happen.
 		*/
-		printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
-		       ic->ino, ic->state);
+		pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
+			ic->ino, ic->state);
 		mutex_unlock(&c->alloc_sem);
 		spin_unlock(&c->inocache_lock);
 		BUG();
@@ -367,8 +382,8 @@
 		   drop the alloc_sem before sleeping. */
 
 		mutex_unlock(&c->alloc_sem);
-		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
-			  ic->ino, ic->state));
+		jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n",
+			  __func__, ic->ino, ic->state);
 		sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
 		/* And because we dropped the alloc_sem we must start again from the
 		   beginning. Ponder chance of livelock here -- we're returning success
@@ -433,7 +448,8 @@
  test_gcnode:
 	if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) {
 		/* Eep. This really should never happen. GC is broken */
-		printk(KERN_ERR "Error garbage collecting node at %08x!\n", ref_offset(jeb->gc_node));
+		pr_err("Error garbage collecting node at %08x!\n",
+		       ref_offset(jeb->gc_node));
 		ret = -ENOSPC;
 	}
  release_sem:
@@ -445,7 +461,8 @@
 
  eraseit:
 	if (c->gcblock && !c->gcblock->used_size) {
-		D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
+		jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n",
+			  c->gcblock->offset);
 		/* We're GC'ing an empty block? */
 		list_add_tail(&c->gcblock->list, &c->erase_pending_list);
 		c->gcblock = NULL;
@@ -475,12 +492,12 @@
 
 	if (c->gcblock != jeb) {
 		spin_unlock(&c->erase_completion_lock);
-		D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
+		jffs2_dbg(1, "GC block is no longer gcblock. Restart\n");
 		goto upnout;
 	}
 	if (ref_obsolete(raw)) {
 		spin_unlock(&c->erase_completion_lock);
-		D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
+		jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n");
 		/* They'll call again */
 		goto upnout;
 	}
@@ -536,10 +553,10 @@
 	} else if (fd) {
 		ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
 	} else {
-		printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n",
-		       ref_offset(raw), f->inocache->ino);
+		pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n",
+			ref_offset(raw), f->inocache->ino);
 		if (ref_obsolete(raw)) {
-			printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
+			pr_warn("But it's obsolete so we don't mind too much\n");
 		} else {
 			jffs2_dbg_dump_node(c, ref_offset(raw));
 			BUG();
@@ -562,7 +579,8 @@
 	uint32_t crc, rawlen;
 	int retried = 0;
 
-	D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
+	jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n",
+		  ref_offset(raw));
 
 	alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
 
@@ -595,8 +613,8 @@
 
 	crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
 	if (je32_to_cpu(node->u.hdr_crc) != crc) {
-		printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-		       ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
+		pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
 		goto bail;
 	}
 
@@ -604,16 +622,18 @@
 	case JFFS2_NODETYPE_INODE:
 		crc = crc32(0, node, sizeof(node->i)-8);
 		if (je32_to_cpu(node->i.node_crc) != crc) {
-			printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-			       ref_offset(raw), je32_to_cpu(node->i.node_crc), crc);
+			pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+				ref_offset(raw), je32_to_cpu(node->i.node_crc),
+				crc);
 			goto bail;
 		}
 
 		if (je32_to_cpu(node->i.dsize)) {
 			crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
 			if (je32_to_cpu(node->i.data_crc) != crc) {
-				printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-				       ref_offset(raw), je32_to_cpu(node->i.data_crc), crc);
+				pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+					ref_offset(raw),
+					je32_to_cpu(node->i.data_crc), crc);
 				goto bail;
 			}
 		}
@@ -622,21 +642,24 @@
 	case JFFS2_NODETYPE_DIRENT:
 		crc = crc32(0, node, sizeof(node->d)-8);
 		if (je32_to_cpu(node->d.node_crc) != crc) {
-			printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-			       ref_offset(raw), je32_to_cpu(node->d.node_crc), crc);
+			pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+				ref_offset(raw),
+				je32_to_cpu(node->d.node_crc), crc);
 			goto bail;
 		}
 
 		if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
-			printk(KERN_WARNING "Name in dirent node at 0x%08x contains zeroes\n", ref_offset(raw));
+			pr_warn("Name in dirent node at 0x%08x contains zeroes\n",
+				ref_offset(raw));
 			goto bail;
 		}
 
 		if (node->d.nsize) {
 			crc = crc32(0, node->d.name, node->d.nsize);
 			if (je32_to_cpu(node->d.name_crc) != crc) {
-				printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-				       ref_offset(raw), je32_to_cpu(node->d.name_crc), crc);
+				pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+					ref_offset(raw),
+					je32_to_cpu(node->d.name_crc), crc);
 				goto bail;
 			}
 		}
@@ -644,8 +667,8 @@
 	default:
 		/* If it's inode-less, we don't _know_ what it is. Just copy it intact */
 		if (ic) {
-			printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
-			       ref_offset(raw), je16_to_cpu(node->u.nodetype));
+			pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
+				ref_offset(raw), je16_to_cpu(node->u.nodetype));
 			goto bail;
 		}
 	}
@@ -657,12 +680,13 @@
 	ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
 
 	if (ret || (retlen != rawlen)) {
-		printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
-		       rawlen, phys_ofs, ret, retlen);
+		pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
+			  rawlen, phys_ofs, ret, retlen);
 		if (retlen) {
 			jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
 		} else {
-			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs);
+			pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
+				  phys_ofs);
 		}
 		if (!retried) {
 			/* Try to reallocate space and retry */
@@ -671,7 +695,7 @@
 
 			retried = 1;
 
-			D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
+			jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n");
 
 			jffs2_dbg_acct_sanity_check(c,jeb);
 			jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -681,14 +705,16 @@
 							it is only an upper estimation */
 
 			if (!ret) {
-				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
+				jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
+					  phys_ofs);
 
 				jffs2_dbg_acct_sanity_check(c,jeb);
 				jffs2_dbg_acct_paranoia_check(c, jeb);
 
 				goto retry;
 			}
-			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
+				  ret);
 		}
 
 		if (!ret)
@@ -698,7 +724,8 @@
 	jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
 
 	jffs2_mark_node_obsolete(c, raw);
-	D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
+	jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n",
+		  ref_offset(raw));
 
  out_node:
 	kfree(node);
@@ -725,29 +752,32 @@
 		/* For these, we don't actually need to read the old node */
 		mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
 		mdata = (char *)&dev;
-		D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
+		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
+			  __func__, mdatalen);
 	} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
 		mdatalen = fn->size;
 		mdata = kmalloc(fn->size, GFP_KERNEL);
 		if (!mdata) {
-			printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
+			pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
 			return -ENOMEM;
 		}
 		ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
 		if (ret) {
-			printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
+			pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n",
+				ret);
 			kfree(mdata);
 			return ret;
 		}
-		D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
+		jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n",
+			  __func__, mdatalen);
 
 	}
 
 	ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
 				JFFS2_SUMMARY_INODE_SIZE);
 	if (ret) {
-		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
-		       sizeof(ri)+ mdatalen, ret);
+		pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
+			sizeof(ri) + mdatalen, ret);
 		goto out;
 	}
 
@@ -784,7 +814,7 @@
 	new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
 
 	if (IS_ERR(new_fn)) {
-		printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+		pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn));
 		ret = PTR_ERR(new_fn);
 		goto out;
 	}
@@ -827,14 +857,15 @@
 	ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
 				JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
 	if (ret) {
-		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
-		       sizeof(rd)+rd.nsize, ret);
+		pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
+			sizeof(rd)+rd.nsize, ret);
 		return ret;
 	}
 	new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
 
 	if (IS_ERR(new_fd)) {
-		printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
+		pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n",
+			PTR_ERR(new_fd));
 		return PTR_ERR(new_fd);
 	}
 	jffs2_add_fd_to_list(c, new_fd, &f->dents);
@@ -887,19 +918,22 @@
 			if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
 				continue;
 
-			D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
+			jffs2_dbg(1, "Check potential deletion dirent at %08x\n",
+				  ref_offset(raw));
 
 			/* This is an obsolete node belonging to the same directory, and it's of the right
 			   length. We need to take a closer look...*/
 			ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
 			if (ret) {
-				printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw));
+				pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n",
+					__func__, ret, ref_offset(raw));
 				/* If we can't read it, we don't need to continue to obsolete it. Continue */
 				continue;
 			}
 			if (retlen != rawlen) {
-				printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
-				       retlen, rawlen, ref_offset(raw));
+				pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
+					__func__, retlen, rawlen,
+					ref_offset(raw));
 				continue;
 			}
 
@@ -923,8 +957,9 @@
 			   a new deletion dirent to replace it */
 			mutex_unlock(&c->erase_free_sem);
 
-			D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
-				  ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
+			jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
+				  ref_offset(fd->raw), fd->name,
+				  ref_offset(raw), je32_to_cpu(rd->ino));
 			kfree(rd);
 
 			return jffs2_garbage_collect_dirent(c, jeb, f, fd);
@@ -947,7 +982,8 @@
 		fdp = &(*fdp)->next;
 	}
 	if (!found) {
-		printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino);
+		pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n",
+			fd->name, f->inocache->ino);
 	}
 	jffs2_mark_node_obsolete(c, fd->raw);
 	jffs2_free_full_dirent(fd);
@@ -964,8 +1000,8 @@
 	uint32_t alloclen, ilen;
 	int ret;
 
-	D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
-		  f->inocache->ino, start, end));
+	jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
+		  f->inocache->ino, start, end);
 
 	memset(&ri, 0, sizeof(ri));
 
@@ -976,35 +1012,37 @@
 		   write it out again with the _same_ version as before */
 		ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
 		if (readlen != sizeof(ri) || ret) {
-			printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen);
+			pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n",
+				ret, readlen);
 			goto fill;
 		}
 		if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
-			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
-			       ref_offset(fn->raw),
-			       je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
+			pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
+				__func__, ref_offset(fn->raw),
+				je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
 			return -EIO;
 		}
 		if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
-			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
-			       ref_offset(fn->raw),
-			       je32_to_cpu(ri.totlen), sizeof(ri));
+			pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
+				__func__, ref_offset(fn->raw),
+				je32_to_cpu(ri.totlen), sizeof(ri));
 			return -EIO;
 		}
 		crc = crc32(0, &ri, sizeof(ri)-8);
 		if (crc != je32_to_cpu(ri.node_crc)) {
-			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
-			       ref_offset(fn->raw),
-			       je32_to_cpu(ri.node_crc), crc);
+			pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
+				__func__, ref_offset(fn->raw),
+				je32_to_cpu(ri.node_crc), crc);
 			/* FIXME: We could possibly deal with this by writing new holes for each frag */
-			printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
-			       start, end, f->inocache->ino);
+			pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+				start, end, f->inocache->ino);
 			goto fill;
 		}
 		if (ri.compr != JFFS2_COMPR_ZERO) {
-			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
-			printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
-			       start, end, f->inocache->ino);
+			pr_warn("%s(): Node 0x%08x wasn't a hole node!\n",
+				__func__, ref_offset(fn->raw));
+			pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+				start, end, f->inocache->ino);
 			goto fill;
 		}
 	} else {
@@ -1043,14 +1081,14 @@
 	ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
 				     JFFS2_SUMMARY_INODE_SIZE);
 	if (ret) {
-		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
-		       sizeof(ri), ret);
+		pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
+			sizeof(ri), ret);
 		return ret;
 	}
 	new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
 
 	if (IS_ERR(new_fn)) {
-		printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
+		pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn));
 		return PTR_ERR(new_fn);
 	}
 	if (je32_to_cpu(ri.version) == f->highest_version) {
@@ -1070,9 +1108,9 @@
 	 * above.)
 	 */
 	D1(if(unlikely(fn->frags <= 1)) {
-		printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
-		       fn->frags, je32_to_cpu(ri.version), f->highest_version,
-		       je32_to_cpu(ri.ino));
+			pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
+				__func__, fn->frags, je32_to_cpu(ri.version),
+				f->highest_version, je32_to_cpu(ri.ino));
 	});
 
 	/* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
@@ -1089,11 +1127,11 @@
 		}
 	}
 	if (fn->frags) {
-		printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n");
+		pr_warn("%s(): Old node still has frags!\n", __func__);
 		BUG();
 	}
 	if (!new_fn->frags) {
-		printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
+		pr_warn("%s(): New node has no frags!\n", __func__);
 		BUG();
 	}
 
@@ -1117,8 +1155,8 @@
 
 	memset(&ri, 0, sizeof(ri));
 
-	D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
-		  f->inocache->ino, start, end));
+	jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
+		  f->inocache->ino, start, end);
 
 	orig_end = end;
 	orig_start = start;
@@ -1149,15 +1187,15 @@
 			/* If the previous frag doesn't even reach the beginning, there's
 			   excessive fragmentation. Just merge. */
 			if (frag->ofs > min) {
-				D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n",
-					  frag->ofs, frag->ofs+frag->size));
+				jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n",
+					  frag->ofs, frag->ofs+frag->size);
 				start = frag->ofs;
 				continue;
 			}
 			/* OK. This frag holds the first byte of the page. */
 			if (!frag->node || !frag->node->raw) {
-				D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
-					  frag->ofs, frag->ofs+frag->size));
+				jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
+					  frag->ofs, frag->ofs+frag->size);
 				break;
 			} else {
 
@@ -1171,19 +1209,25 @@
 				jeb = &c->blocks[raw->flash_offset / c->sector_size];
 
 				if (jeb == c->gcblock) {
-					D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
-						  frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+					jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+						  frag->ofs,
+						  frag->ofs + frag->size,
+						  ref_offset(raw));
 					start = frag->ofs;
 					break;
 				}
 				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
-					D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
-						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+					jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
+						  frag->ofs,
+						  frag->ofs + frag->size,
+						  jeb->offset);
 					break;
 				}
 
-				D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
-						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+				jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
+					  frag->ofs,
+					  frag->ofs + frag->size,
+					  jeb->offset);
 				start = frag->ofs;
 				break;
 			}
@@ -1199,15 +1243,15 @@
 			/* If the previous frag doesn't even reach the beginning, there's lots
 			   of fragmentation. Just merge. */
 			if (frag->ofs+frag->size < max) {
-				D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n",
-					  frag->ofs, frag->ofs+frag->size));
+				jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n",
+					  frag->ofs, frag->ofs+frag->size);
 				end = frag->ofs + frag->size;
 				continue;
 			}
 
 			if (!frag->node || !frag->node->raw) {
-				D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
-					  frag->ofs, frag->ofs+frag->size));
+				jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
+					  frag->ofs, frag->ofs+frag->size);
 				break;
 			} else {
 
@@ -1221,25 +1265,31 @@
 				jeb = &c->blocks[raw->flash_offset / c->sector_size];
 
 				if (jeb == c->gcblock) {
-					D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
-						  frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+					jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+						  frag->ofs,
+						  frag->ofs + frag->size,
+						  ref_offset(raw));
 					end = frag->ofs + frag->size;
 					break;
 				}
 				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
-					D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
-						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+					jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
+						  frag->ofs,
+						  frag->ofs + frag->size,
+						  jeb->offset);
 					break;
 				}
 
-				D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
-						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+				jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
+					  frag->ofs,
+					  frag->ofs + frag->size,
+					  jeb->offset);
 				end = frag->ofs + frag->size;
 				break;
 			}
 		}
-		D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
-			  orig_start, orig_end, start, end));
+		jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
+			  orig_start, orig_end, start, end);
 
 		D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
 		BUG_ON(end < orig_end);
@@ -1256,7 +1306,8 @@
 	pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
 
 	if (IS_ERR(pg_ptr)) {
-		printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
+		pr_warn("read_cache_page() returned error: %ld\n",
+			PTR_ERR(pg_ptr));
 		return PTR_ERR(pg_ptr);
 	}
 
@@ -1270,8 +1321,8 @@
 					&alloclen, JFFS2_SUMMARY_INODE_SIZE);
 
 		if (ret) {
-			printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
-			       sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret);
+			pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
+				sizeof(ri) + JFFS2_MIN_DATA_LEN, ret);
 			break;
 		}
 		cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
@@ -1308,7 +1359,8 @@
 		jffs2_free_comprbuf(comprbuf, writebuf);
 
 		if (IS_ERR(new_fn)) {
-			printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+			pr_warn("Error writing new dnode: %ld\n",
+				PTR_ERR(new_fn));
 			ret = PTR_ERR(new_fn);
 			break;
 		}
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index c082868..4f47aa2 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/init.h>
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 5e03233..975a1f5 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
@@ -687,8 +689,8 @@
 	if (!size)
 		return 0;
 	if (unlikely(size > jeb->free_size)) {
-		printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
-		       size, jeb->free_size, jeb->wasted_size);
+		pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
+			size, jeb->free_size, jeb->wasted_size);
 		BUG();
 	}
 	/* REF_EMPTY_NODE is !obsolete, so that works OK */
@@ -726,8 +728,10 @@
 
 		/* Last node in block. Use free_space */
 		if (unlikely(ref != jeb->last_node)) {
-			printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
-			       ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0);
+			pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
+				ref, ref_offset(ref), jeb->last_node,
+				jeb->last_node ?
+				ref_offset(jeb->last_node) : 0);
 			BUG();
 		}
 		ref_end = jeb->offset + c->sector_size - jeb->free_size;
@@ -747,16 +751,20 @@
 		if (!jeb)
 			jeb = &c->blocks[ref->flash_offset / c->sector_size];
 
-		printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
-		       ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
-		       ret, ref->__totlen);
+		pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
+			ref, ref_offset(ref), ref_offset(ref) + ref->__totlen,
+			ret, ref->__totlen);
 		if (ref_next(ref)) {
-			printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)),
-			       ref_offset(ref_next(ref))+ref->__totlen);
+			pr_crit("next %p (0x%08x-0x%08x)\n",
+				ref_next(ref), ref_offset(ref_next(ref)),
+				ref_offset(ref_next(ref)) + ref->__totlen);
 		} else 
-			printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node);
+			pr_crit("No next ref. jeb->last_node is %p\n",
+				jeb->last_node);
 
-		printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size);
+		pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n",
+			jeb->wasted_size, jeb->dirty_size, jeb->used_size,
+			jeb->free_size);
 
 #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
 		__jffs2_dbg_dump_node_refs_nolock(c, jeb);
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 694aa5b..6784d1e 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/mtd/mtd.h>
 #include <linux/compiler.h>
@@ -46,10 +48,10 @@
 	/* align it */
 	minsize = PAD(minsize);
 
-	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
+	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
 	mutex_lock(&c->alloc_sem);
 
-	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
+	jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
 
 	spin_lock(&c->erase_completion_lock);
 
@@ -73,11 +75,13 @@
 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
 			if (dirty < c->nospc_dirty_size) {
 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
-					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
+					jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
+						  __func__);
 					break;
 				}
-				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
-					  dirty, c->unchecked_size, c->sector_size));
+				jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
+					  dirty, c->unchecked_size,
+					  c->sector_size);
 
 				spin_unlock(&c->erase_completion_lock);
 				mutex_unlock(&c->alloc_sem);
@@ -96,12 +100,13 @@
 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
 			if ( (avail / c->sector_size) <= blocksneeded) {
 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
-					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
+					jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
+						  __func__);
 					break;
 				}
 
-				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
-					  avail, blocksneeded * c->sector_size));
+				jffs2_dbg(1, "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
+					  avail, blocksneeded * c->sector_size);
 				spin_unlock(&c->erase_completion_lock);
 				mutex_unlock(&c->alloc_sem);
 				return -ENOSPC;
@@ -109,9 +114,14 @@
 
 			mutex_unlock(&c->alloc_sem);
 
-			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
-				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
-				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
+			jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
+				  c->nr_free_blocks, c->nr_erasing_blocks,
+				  c->free_size, c->dirty_size, c->wasted_size,
+				  c->used_size, c->erasing_size, c->bad_size,
+				  c->free_size + c->dirty_size +
+				  c->wasted_size + c->used_size +
+				  c->erasing_size + c->bad_size,
+				  c->flash_size);
 			spin_unlock(&c->erase_completion_lock);
 
 			ret = jffs2_garbage_collect_pass(c);
@@ -124,7 +134,8 @@
 					DECLARE_WAITQUEUE(wait, current);
 					set_current_state(TASK_UNINTERRUPTIBLE);
 					add_wait_queue(&c->erase_wait, &wait);
-					D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__));
+					jffs2_dbg(1, "%s waiting for erase to complete\n",
+						  __func__);
 					spin_unlock(&c->erase_completion_lock);
 
 					schedule();
@@ -144,7 +155,7 @@
 
 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
 		if (ret) {
-			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
+			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
 		}
 	}
 	spin_unlock(&c->erase_completion_lock);
@@ -161,13 +172,14 @@
 	int ret = -EAGAIN;
 	minsize = PAD(minsize);
 
-	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
+	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
 
 	spin_lock(&c->erase_completion_lock);
 	while(ret == -EAGAIN) {
 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
 		if (ret) {
-			D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
+			jffs2_dbg(1, "%s(): looping, ret is %d\n",
+				  __func__, ret);
 		}
 	}
 	spin_unlock(&c->erase_completion_lock);
@@ -184,8 +196,8 @@
 {
 
 	if (c->nextblock == NULL) {
-		D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
-		  jeb->offset));
+		jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
+			  __func__, jeb->offset);
 		return;
 	}
 	/* Check, if we have a dirty block now, or if it was dirty already */
@@ -195,17 +207,20 @@
 		jeb->dirty_size += jeb->wasted_size;
 		jeb->wasted_size = 0;
 		if (VERYDIRTY(c, jeb->dirty_size)) {
-			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
-			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+			jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+				  jeb->offset, jeb->free_size, jeb->dirty_size,
+				  jeb->used_size);
 			list_add_tail(&jeb->list, &c->very_dirty_list);
 		} else {
-			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
-			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+			jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+				  jeb->offset, jeb->free_size, jeb->dirty_size,
+				  jeb->used_size);
 			list_add_tail(&jeb->list, &c->dirty_list);
 		}
 	} else {
-		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
-		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size,
+			  jeb->used_size);
 		list_add_tail(&jeb->list, &c->clean_list);
 	}
 	c->nextblock = NULL;
@@ -230,13 +245,14 @@
 			list_move_tail(&ejeb->list, &c->erase_pending_list);
 			c->nr_erasing_blocks++;
 			jffs2_garbage_collect_trigger(c);
-			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
-				  ejeb->offset));
+			jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
+				  __func__, ejeb->offset);
 		}
 
 		if (!c->nr_erasing_blocks &&
 			!list_empty(&c->erasable_pending_wbuf_list)) {
-			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
+			jffs2_dbg(1, "%s(): Flushing write buffer\n",
+				  __func__);
 			/* c->nextblock is NULL, no update to c->nextblock allowed */
 			spin_unlock(&c->erase_completion_lock);
 			jffs2_flush_wbuf_pad(c);
@@ -248,9 +264,11 @@
 		if (!c->nr_erasing_blocks) {
 			/* Ouch. We're in GC, or we wouldn't have got here.
 			   And there's no space left. At all. */
-			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
-				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
-				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
+			pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
+				c->nr_erasing_blocks, c->nr_free_blocks,
+				list_empty(&c->erasable_list) ? "yes" : "no",
+				list_empty(&c->erasing_list) ? "yes" : "no",
+				list_empty(&c->erase_pending_list) ? "yes" : "no");
 			return -ENOSPC;
 		}
 
@@ -278,7 +296,8 @@
 		c->wbuf_ofs = 0xffffffff;
 #endif
 
-	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
+	jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
+		  __func__, c->nextblock->offset);
 
 	return 0;
 }
@@ -345,7 +364,8 @@
 
 			if (jffs2_wbuf_dirty(c)) {
 				spin_unlock(&c->erase_completion_lock);
-				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
+				jffs2_dbg(1, "%s(): Flushing write buffer\n",
+					  __func__);
 				jffs2_flush_wbuf_pad(c);
 				spin_lock(&c->erase_completion_lock);
 				jeb = c->nextblock;
@@ -387,7 +407,8 @@
 		jeb = c->nextblock;
 
 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
-			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
+			pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
+				jeb->offset, jeb->free_size);
 			goto restart;
 		}
 	}
@@ -408,8 +429,9 @@
 		spin_lock(&c->erase_completion_lock);
 	}
 
-	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
-		  *len, jeb->offset + (c->sector_size - jeb->free_size)));
+	jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
+		  __func__,
+		  *len, jeb->offset + (c->sector_size - jeb->free_size));
 	return 0;
 }
 
@@ -434,20 +456,22 @@
 
 	jeb = &c->blocks[ofs / c->sector_size];
 
-	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
-		  ofs & ~3, ofs & 3, len));
+	jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
+		  __func__, ofs & ~3, ofs & 3, len);
 #if 1
 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock, 
 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
 	   even after refiling c->nextblock */
 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
-		printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
+		pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
+			ofs & ~3, ofs & 3);
 		if (c->nextblock)
-			printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
+			pr_warn("nextblock 0x%08x", c->nextblock->offset);
 		else
-			printk(KERN_WARNING "No nextblock");
-		printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
+			pr_warn("No nextblock");
+		pr_cont(", expected at %08x\n",
+			jeb->offset + (c->sector_size - jeb->free_size));
 		return ERR_PTR(-EINVAL);
 	}
 #endif
@@ -457,8 +481,9 @@
 
 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
-		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
-			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size,
+			  jeb->used_size);
 		if (jffs2_wbuf_dirty(c)) {
 			/* Flush the last write in the block if it's outstanding */
 			spin_unlock(&c->erase_completion_lock);
@@ -480,7 +505,7 @@
 
 void jffs2_complete_reservation(struct jffs2_sb_info *c)
 {
-	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
+	jffs2_dbg(1, "jffs2_complete_reservation()\n");
 	spin_lock(&c->erase_completion_lock);
 	jffs2_garbage_collect_trigger(c);
 	spin_unlock(&c->erase_completion_lock);
@@ -493,7 +518,7 @@
 
 	list_for_each(this, head) {
 		if (this == obj) {
-			D1(printk("%p is on list at %p\n", obj, head));
+			jffs2_dbg(1, "%p is on list at %p\n", obj, head);
 			return 1;
 
 		}
@@ -511,16 +536,18 @@
 	uint32_t freed_len;
 
 	if(unlikely(!ref)) {
-		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
+		pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
 		return;
 	}
 	if (ref_obsolete(ref)) {
-		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
+		jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
+			  __func__, ref_offset(ref));
 		return;
 	}
 	blocknr = ref->flash_offset / c->sector_size;
 	if (blocknr >= c->nr_blocks) {
-		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
+		pr_notice("raw node at 0x%08x is off the end of device!\n",
+			  ref->flash_offset);
 		BUG();
 	}
 	jeb = &c->blocks[blocknr];
@@ -542,27 +569,31 @@
 
 	if (ref_flags(ref) == REF_UNCHECKED) {
 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
-			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
-			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
+				pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
+					  freed_len, blocknr,
+					  ref->flash_offset, jeb->used_size);
 			BUG();
 		})
-		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
+			jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
+				  ref_offset(ref), freed_len);
 		jeb->unchecked_size -= freed_len;
 		c->unchecked_size -= freed_len;
 	} else {
 		D1(if (unlikely(jeb->used_size < freed_len)) {
-			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
-			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
+				pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
+					  freed_len, blocknr,
+					  ref->flash_offset, jeb->used_size);
 			BUG();
 		})
-		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
+			jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
+				  ref_offset(ref), freed_len);
 		jeb->used_size -= freed_len;
 		c->used_size -= freed_len;
 	}
 
 	// Take care, that wasted size is taken into concern
 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
-		D1(printk("Dirtying\n"));
+		jffs2_dbg(1, "Dirtying\n");
 		addedsize = freed_len;
 		jeb->dirty_size += freed_len;
 		c->dirty_size += freed_len;
@@ -570,12 +601,12 @@
 		/* Convert wasted space to dirty, if not a bad block */
 		if (jeb->wasted_size) {
 			if (on_list(&jeb->list, &c->bad_used_list)) {
-				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
-					  jeb->offset));
+				jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
+					  jeb->offset);
 				addedsize = 0; /* To fool the refiling code later */
 			} else {
-				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
-					  jeb->wasted_size, jeb->offset));
+				jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
+					  jeb->wasted_size, jeb->offset);
 				addedsize += jeb->wasted_size;
 				jeb->dirty_size += jeb->wasted_size;
 				c->dirty_size += jeb->wasted_size;
@@ -584,7 +615,7 @@
 			}
 		}
 	} else {
-		D1(printk("Wasting\n"));
+		jffs2_dbg(1, "Wasting\n");
 		addedsize = 0;
 		jeb->wasted_size += freed_len;
 		c->wasted_size += freed_len;
@@ -606,50 +637,57 @@
 	}
 
 	if (jeb == c->nextblock) {
-		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
+		jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
+			  jeb->offset);
 	} else if (!jeb->used_size && !jeb->unchecked_size) {
 		if (jeb == c->gcblock) {
-			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
+			jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
+				  jeb->offset);
 			c->gcblock = NULL;
 		} else {
-			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
+			jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
+				  jeb->offset);
 			list_del(&jeb->list);
 		}
 		if (jffs2_wbuf_dirty(c)) {
-			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
+			jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
 		} else {
 			if (jiffies & 127) {
 				/* Most of the time, we just erase it immediately. Otherwise we
 				   spend ages scanning it on mount, etc. */
-				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+				jffs2_dbg(1, "...and adding to erase_pending_list\n");
 				list_add_tail(&jeb->list, &c->erase_pending_list);
 				c->nr_erasing_blocks++;
 				jffs2_garbage_collect_trigger(c);
 			} else {
 				/* Sometimes, however, we leave it elsewhere so it doesn't get
 				   immediately reused, and we spread the load a bit. */
-				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+				jffs2_dbg(1, "...and adding to erasable_list\n");
 				list_add_tail(&jeb->list, &c->erasable_list);
 			}
 		}
-		D1(printk(KERN_DEBUG "Done OK\n"));
+		jffs2_dbg(1, "Done OK\n");
 	} else if (jeb == c->gcblock) {
-		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
+		jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
+			  jeb->offset);
 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
-		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
+		jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
+			  jeb->offset);
 		list_del(&jeb->list);
-		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
+		jffs2_dbg(1, "...and adding to dirty_list\n");
 		list_add_tail(&jeb->list, &c->dirty_list);
 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
-		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
+		jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
+			  jeb->offset);
 		list_del(&jeb->list);
-		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
+		jffs2_dbg(1, "...and adding to very_dirty_list\n");
 		list_add_tail(&jeb->list, &c->very_dirty_list);
 	} else {
-		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
-			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+		jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size,
+			  jeb->used_size);
 	}
 
 	spin_unlock(&c->erase_completion_lock);
@@ -665,33 +703,40 @@
 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
 
-	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
+	jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
+		  ref_offset(ref));
 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
 	if (ret) {
-		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+		pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
+			ref_offset(ref), ret);
 		goto out_erase_sem;
 	}
 	if (retlen != sizeof(n)) {
-		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+		pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
+			ref_offset(ref), retlen);
 		goto out_erase_sem;
 	}
 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
-		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
+		pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
+			je32_to_cpu(n.totlen), freed_len);
 		goto out_erase_sem;
 	}
 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
-		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
+		jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
+			  ref_offset(ref), je16_to_cpu(n.nodetype));
 		goto out_erase_sem;
 	}
 	/* XXX FIXME: This is ugly now */
 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
 	if (ret) {
-		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+		pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
+			ref_offset(ref), ret);
 		goto out_erase_sem;
 	}
 	if (retlen != sizeof(n)) {
-		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+		pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
+			ref_offset(ref), retlen);
 		goto out_erase_sem;
 	}
 
@@ -751,8 +796,8 @@
 		return 1;
 
 	if (c->unchecked_size) {
-		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
-			  c->unchecked_size, c->checked_ino));
+		jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
+			  c->unchecked_size, c->checked_ino);
 		return 1;
 	}
 
@@ -780,8 +825,9 @@
 		}
 	}
 
-	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
-		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
+	jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
+		  __func__, c->nr_free_blocks, c->nr_erasing_blocks,
+		  c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
 
 	return ret;
 }
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index ab65ee3..1cd3aec 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -76,7 +76,7 @@
 #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO)
 
 #define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf)
-#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf))
 #define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; })
 #define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; })
 #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
@@ -108,8 +108,6 @@
 
 #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
 
-#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
-#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
 #define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
 
 /* wbuf.c */
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index 3f39be1..0b042b1f 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/crc32.h>
@@ -36,24 +38,25 @@
 	ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri);
 	if (ret) {
 		jffs2_free_raw_inode(ri);
-		printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret);
+		pr_warn("Error reading node from 0x%08x: %d\n",
+			ref_offset(fd->raw), ret);
 		return ret;
 	}
 	if (readlen != sizeof(*ri)) {
 		jffs2_free_raw_inode(ri);
-		printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
-		       ref_offset(fd->raw), sizeof(*ri), readlen);
+		pr_warn("Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
+			ref_offset(fd->raw), sizeof(*ri), readlen);
 		return -EIO;
 	}
 	crc = crc32(0, ri, sizeof(*ri)-8);
 
-	D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
+	jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
 		  ref_offset(fd->raw), je32_to_cpu(ri->node_crc),
 		  crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize),
-		  je32_to_cpu(ri->offset), buf));
+		  je32_to_cpu(ri->offset), buf);
 	if (crc != je32_to_cpu(ri->node_crc)) {
-		printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n",
-		       je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
+		pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n",
+			je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
 		ret = -EIO;
 		goto out_ri;
 	}
@@ -66,8 +69,8 @@
 	}
 
 	D1(if(ofs + len > je32_to_cpu(ri->dsize)) {
-		printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
-		       len, ofs, je32_to_cpu(ri->dsize));
+			pr_warn("jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
+				len, ofs, je32_to_cpu(ri->dsize));
 		ret = -EINVAL;
 		goto out_ri;
 	});
@@ -107,8 +110,8 @@
 		decomprbuf = readbuf;
 	}
 
-	D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
-		  readbuf));
+	jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
+		  readbuf);
 	ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri),
 			       je32_to_cpu(ri->csize), &readlen, readbuf);
 
@@ -119,18 +122,19 @@
 
 	crc = crc32(0, readbuf, je32_to_cpu(ri->csize));
 	if (crc != je32_to_cpu(ri->data_crc)) {
-		printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n",
-		       je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
+		pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n",
+			je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
 		ret = -EIO;
 		goto out_decomprbuf;
 	}
-	D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc));
+	jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc);
 	if (ri->compr != JFFS2_COMPR_NONE) {
-		D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n",
-			  je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf));
+		jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n",
+			  je32_to_cpu(ri->csize), readbuf,
+			  je32_to_cpu(ri->dsize), decomprbuf);
 		ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
 		if (ret) {
-			printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret);
+			pr_warn("Error: jffs2_decompress returned %d\n", ret);
 			goto out_decomprbuf;
 		}
 	}
@@ -157,8 +161,8 @@
 	struct jffs2_node_frag *frag;
 	int ret;
 
-	D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n",
-		  f->inocache->ino, offset, offset+len));
+	jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n",
+		  __func__, f->inocache->ino, offset, offset + len);
 
 	frag = jffs2_lookup_node_frag(&f->fragtree, offset);
 
@@ -168,22 +172,27 @@
 	 * (or perhaps is before it, if we've been asked to read off the
 	 * end of the file). */
 	while(offset < end) {
-		D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
+		jffs2_dbg(2, "%s(): offset %d, end %d\n",
+			  __func__, offset, end);
 		if (unlikely(!frag || frag->ofs > offset ||
 			     frag->ofs + frag->size <= offset)) {
 			uint32_t holesize = end - offset;
 			if (frag && frag->ofs > offset) {
-				D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
+				jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
+					  f->inocache->ino, frag->ofs, offset);
 				holesize = min(holesize, frag->ofs - offset);
 			}
-			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
+			jffs2_dbg(1, "Filling non-frag hole from %d-%d\n",
+				  offset, offset + holesize);
 			memset(buf, 0, holesize);
 			buf += holesize;
 			offset += holesize;
 			continue;
 		} else if (unlikely(!frag->node)) {
 			uint32_t holeend = min(end, frag->ofs + frag->size);
-			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
+			jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n",
+				  offset, holeend, frag->ofs,
+				  frag->ofs + frag->size);
 			memset(buf, 0, holeend - offset);
 			buf += holeend - offset;
 			offset = holeend;
@@ -195,20 +204,23 @@
 
 			fragofs = offset - frag->ofs;
 			readlen = min(frag->size - fragofs, end - offset);
-			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n",
-				  frag->ofs+fragofs, frag->ofs+fragofs+readlen,
-				  ref_offset(frag->node->raw), ref_flags(frag->node->raw)));
+			jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n",
+				  frag->ofs+fragofs,
+				  frag->ofs + fragofs+readlen,
+				  ref_offset(frag->node->raw),
+				  ref_flags(frag->node->raw));
 			ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
-			D2(printk(KERN_DEBUG "node read done\n"));
+			jffs2_dbg(2, "node read done\n");
 			if (ret) {
-				D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret));
+				jffs2_dbg(1, "%s(): error %d\n",
+					  __func__, ret);
 				memset(buf, 0, readlen);
 				return ret;
 			}
 			buf += readlen;
 			offset += readlen;
 			frag = frag_next(frag);
-			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
+			jffs2_dbg(2, "node read was OK. Looping\n");
 		}
 	}
 	return 0;
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 3093ac4..dc0437e 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index f994648..7654e87 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -22,15 +24,15 @@
 
 #define DEFAULT_EMPTY_SCAN_SIZE 256
 
-#define noisy_printk(noise, args...) do { \
-	if (*(noise)) { \
-		printk(KERN_NOTICE args); \
-		 (*(noise))--; \
-		 if (!(*(noise))) { \
-			 printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
-		 } \
-	} \
-} while(0)
+#define noisy_printk(noise, fmt, ...)					\
+do {									\
+	if (*(noise)) {							\
+		pr_notice(fmt, ##__VA_ARGS__);				\
+		(*(noise))--;						\
+		if (!(*(noise)))					\
+			pr_notice("Further such events for this erase block will not be printed\n"); \
+	}								\
+} while (0)
 
 static uint32_t pseudo_random;
 
@@ -96,18 +98,17 @@
 #ifndef __ECOS
 	size_t pointlen, try_size;
 
-	if (c->mtd->point) {
-		ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
-				(void **)&flashbuf, NULL);
-		if (!ret && pointlen < c->mtd->size) {
-			/* Don't muck about if it won't let us point to the whole flash */
-			D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
-			mtd_unpoint(c->mtd, 0, pointlen);
-			flashbuf = NULL;
-		}
-		if (ret && ret != -EOPNOTSUPP)
-			D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
+	ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
+			(void **)&flashbuf, NULL);
+	if (!ret && pointlen < c->mtd->size) {
+		/* Don't muck about if it won't let us point to the whole flash */
+		jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
+			  pointlen);
+		mtd_unpoint(c->mtd, 0, pointlen);
+		flashbuf = NULL;
 	}
+	if (ret && ret != -EOPNOTSUPP)
+		jffs2_dbg(1, "MTD point failed %d\n", ret);
 #endif
 	if (!flashbuf) {
 		/* For NAND it's quicker to read a whole eraseblock at a time,
@@ -117,15 +118,15 @@
 		else
 			try_size = PAGE_SIZE;
 
-		D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu "
-			"bytes\n", try_size));
+		jffs2_dbg(1, "Trying to allocate readbuf of %zu "
+			  "bytes\n", try_size);
 
 		flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
 		if (!flashbuf)
 			return -ENOMEM;
 
-		D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n",
-			try_size));
+		jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
+			  try_size);
 
 		buf_size = (uint32_t)try_size;
 	}
@@ -178,7 +179,8 @@
 				c->nr_free_blocks++;
 			} else {
 				/* Dirt */
-				D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
+				jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n",
+					  jeb->offset);
 				list_add(&jeb->list, &c->erase_pending_list);
 				c->nr_erasing_blocks++;
 			}
@@ -205,7 +207,8 @@
 				}
 				/* update collected summary information for the current nextblock */
 				jffs2_sum_move_collected(c, s);
-				D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
+				jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
+					  __func__, jeb->offset);
 				c->nextblock = jeb;
 			} else {
 				ret = file_dirty(c, jeb);
@@ -217,20 +220,21 @@
 		case BLK_STATE_ALLDIRTY:
 			/* Nothing valid - not even a clean marker. Needs erasing. */
 			/* For now we just put it on the erasing list. We'll start the erases later */
-			D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
+			jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n",
+				  jeb->offset);
 			list_add(&jeb->list, &c->erase_pending_list);
 			c->nr_erasing_blocks++;
 			break;
 
 		case BLK_STATE_BADBLOCK:
-			D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
+			jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset);
 			list_add(&jeb->list, &c->bad_list);
 			c->bad_size += c->sector_size;
 			c->free_size -= c->sector_size;
 			bad_blocks++;
 			break;
 		default:
-			printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
+			pr_warn("%s(): unknown block state\n", __func__);
 			BUG();
 		}
 	}
@@ -250,16 +254,17 @@
 
 		uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
 
-		D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
-			  skip));
+		jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
+			  __func__, skip);
 		jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
 		jffs2_scan_dirty_space(c, c->nextblock, skip);
 	}
 #endif
 	if (c->nr_erasing_blocks) {
 		if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
-			printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
-			printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
+			pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
+			pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
+				  empty_blocks, bad_blocks, c->nr_blocks);
 			ret = -EIO;
 			goto out;
 		}
@@ -287,11 +292,13 @@
 
 	ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
 	if (ret) {
-		D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
+		jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n",
+			  len, ofs, ret);
 		return ret;
 	}
 	if (retlen < len) {
-		D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
+		jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n",
+			  ofs, retlen);
 		return -EIO;
 	}
 	return 0;
@@ -368,7 +375,7 @@
 
 	if (jffs2_sum_active())
 		jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
-	dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
+	dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n",
 		  ofs, xd->xid, xd->version);
 	return 0;
 }
@@ -449,7 +456,7 @@
 	ofs = jeb->offset;
 	prevofs = jeb->offset - 1;
 
-	D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
+	jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs);
 
 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
 	if (jffs2_cleanmarker_oob(c)) {
@@ -459,7 +466,7 @@
 			return BLK_STATE_BADBLOCK;
 
 		ret = jffs2_check_nand_cleanmarker(c, jeb);
-		D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
+		jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret);
 
 		/* Even if it's not found, we still scan to see
 		   if the block is empty. We use this information
@@ -561,7 +568,8 @@
 		if (jffs2_cleanmarker_oob(c)) {
 			/* scan oob, take care of cleanmarker */
 			int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
-			D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
+			jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n",
+				  ret);
 			switch (ret) {
 			case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
 			case 1: 	return BLK_STATE_ALLDIRTY;
@@ -569,15 +577,16 @@
 			}
 		}
 #endif
-		D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
+		jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n",
+			  jeb->offset);
 		if (c->cleanmarker_size == 0)
 			return BLK_STATE_CLEANMARKER;	/* don't bother with re-erase */
 		else
 			return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */
 	}
 	if (ofs) {
-		D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
-			  jeb->offset + ofs));
+		jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset,
+			  jeb->offset + ofs);
 		if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
 			return err;
 		if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
@@ -604,12 +613,13 @@
 		cond_resched();
 
 		if (ofs & 3) {
-			printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
+			pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs);
 			ofs = PAD(ofs);
 			continue;
 		}
 		if (ofs == prevofs) {
-			printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
+			pr_warn("ofs 0x%08x has already been seen. Skipping\n",
+				ofs);
 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
 				return err;
 			ofs += 4;
@@ -618,8 +628,10 @@
 		prevofs = ofs;
 
 		if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
-			D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
-				  jeb->offset, c->sector_size, ofs, sizeof(*node)));
+			jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n",
+				  sizeof(struct jffs2_unknown_node),
+				  jeb->offset, c->sector_size, ofs,
+				  sizeof(*node));
 			if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
 				return err;
 			break;
@@ -627,8 +639,9 @@
 
 		if (buf_ofs + buf_len < ofs + sizeof(*node)) {
 			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
-			D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
-				  sizeof(struct jffs2_unknown_node), buf_len, ofs));
+			jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
+				  sizeof(struct jffs2_unknown_node),
+				  buf_len, ofs);
 			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
 			if (err)
 				return err;
@@ -645,13 +658,13 @@
 			ofs += 4;
 			scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
 
-			D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
+			jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs);
 		more_empty:
 			inbuf_ofs = ofs - buf_ofs;
 			while (inbuf_ofs < scan_end) {
 				if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
-					printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
-					       empty_start, ofs);
+					pr_warn("Empty flash at 0x%08x ends at 0x%08x\n",
+						empty_start, ofs);
 					if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
 						return err;
 					goto scan_more;
@@ -661,13 +674,15 @@
 				ofs += 4;
 			}
 			/* Ran off end. */
-			D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
+			jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n",
+				  ofs);
 
 			/* If we're only checking the beginning of a block with a cleanmarker,
 			   bail now */
 			if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
 			    c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
-				D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
+				jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n",
+					  EMPTY_SCAN_SIZE(c->sector_size));
 				return BLK_STATE_CLEANMARKER;
 			}
 			if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
@@ -680,13 +695,14 @@
 			if (!buf_len) {
 				/* No more to read. Break out of main loop without marking
 				   this range of empty space as dirty (because it's not) */
-				D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
-					  empty_start));
+				jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n",
+					  empty_start);
 				break;
 			}
 			/* point never reaches here */
 			scan_end = buf_len;
-			D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
+			jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n",
+				  buf_len, ofs);
 			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
 			if (err)
 				return err;
@@ -695,22 +711,23 @@
 		}
 
 		if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
-			printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
+			pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n",
+				ofs);
 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
 				return err;
 			ofs += 4;
 			continue;
 		}
 		if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
-			D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
+			jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs);
 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
 				return err;
 			ofs += 4;
 			continue;
 		}
 		if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
-			printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
-			printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
+			pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs);
+			pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n");
 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
 				return err;
 			ofs += 4;
@@ -718,7 +735,8 @@
 		}
 		if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
 			/* OK. We're out of possibilities. Whinge and move on */
-			noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
+			noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
+				     __func__,
 				     JFFS2_MAGIC_BITMASK, ofs,
 				     je16_to_cpu(node->magic));
 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
@@ -733,7 +751,8 @@
 		hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
 
 		if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
-			noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
+			noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
+				     __func__,
 				     ofs, je16_to_cpu(node->magic),
 				     je16_to_cpu(node->nodetype),
 				     je32_to_cpu(node->totlen),
@@ -747,9 +766,9 @@
 
 		if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
 			/* Eep. Node goes over the end of the erase block. */
-			printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
-			       ofs, je32_to_cpu(node->totlen));
-			printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
+			pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
+				ofs, je32_to_cpu(node->totlen));
+			pr_warn("Perhaps the file system was created with the wrong erase size?\n");
 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
 				return err;
 			ofs += 4;
@@ -758,7 +777,8 @@
 
 		if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
 			/* Wheee. This is an obsoleted node */
-			D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
+			jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n",
+				  ofs);
 			if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
 				return err;
 			ofs += PAD(je32_to_cpu(node->totlen));
@@ -769,8 +789,9 @@
 		case JFFS2_NODETYPE_INODE:
 			if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
-				D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
-					  sizeof(struct jffs2_raw_inode), buf_len, ofs));
+				jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  sizeof(struct jffs2_raw_inode),
+					  buf_len, ofs);
 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
 				if (err)
 					return err;
@@ -785,8 +806,9 @@
 		case JFFS2_NODETYPE_DIRENT:
 			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
-				D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
-					  je32_to_cpu(node->totlen), buf_len, ofs));
+				jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  je32_to_cpu(node->totlen), buf_len,
+					  ofs);
 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
 				if (err)
 					return err;
@@ -802,9 +824,9 @@
 		case JFFS2_NODETYPE_XATTR:
 			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
-				D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
-					  " left to end of buf. Reading 0x%x at 0x%08x\n",
-					  je32_to_cpu(node->totlen), buf_len, ofs));
+				jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  je32_to_cpu(node->totlen), buf_len,
+					  ofs);
 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
 				if (err)
 					return err;
@@ -819,9 +841,9 @@
 		case JFFS2_NODETYPE_XREF:
 			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
-				D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
-					  " left to end of buf. Reading 0x%x at 0x%08x\n",
-					  je32_to_cpu(node->totlen), buf_len, ofs));
+				jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  je32_to_cpu(node->totlen), buf_len,
+					  ofs);
 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
 				if (err)
 					return err;
@@ -836,15 +858,17 @@
 #endif	/* CONFIG_JFFS2_FS_XATTR */
 
 		case JFFS2_NODETYPE_CLEANMARKER:
-			D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
+			jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs);
 			if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
-				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
-				       ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
+				pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
+					  ofs, je32_to_cpu(node->totlen),
+					  c->cleanmarker_size);
 				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
 					return err;
 				ofs += PAD(sizeof(struct jffs2_unknown_node));
 			} else if (jeb->first_node) {
-				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
+				pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n",
+					  ofs, jeb->offset);
 				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
 					return err;
 				ofs += PAD(sizeof(struct jffs2_unknown_node));
@@ -866,7 +890,8 @@
 		default:
 			switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
 			case JFFS2_FEATURE_ROCOMPAT:
-				printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+				pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n",
+					  je16_to_cpu(node->nodetype), ofs);
 				c->flags |= JFFS2_SB_FLAG_RO;
 				if (!(jffs2_is_readonly(c)))
 					return -EROFS;
@@ -876,18 +901,21 @@
 				break;
 
 			case JFFS2_FEATURE_INCOMPAT:
-				printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+				pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n",
+					  je16_to_cpu(node->nodetype), ofs);
 				return -EINVAL;
 
 			case JFFS2_FEATURE_RWCOMPAT_DELETE:
-				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+				jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
+					  je16_to_cpu(node->nodetype), ofs);
 				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
 					return err;
 				ofs += PAD(je32_to_cpu(node->totlen));
 				break;
 
 			case JFFS2_FEATURE_RWCOMPAT_COPY: {
-				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+				jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
+					  je16_to_cpu(node->nodetype), ofs);
 
 				jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
 
@@ -908,8 +936,9 @@
 		}
 	}
 
-	D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
-		  jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
+	jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
+		  jeb->offset, jeb->free_size, jeb->dirty_size,
+		  jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
 	
 	/* mark_node_obsolete can add to wasted !! */
 	if (jeb->wasted_size) {
@@ -935,7 +964,7 @@
 
 	ic = jffs2_alloc_inode_cache();
 	if (!ic) {
-		printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
+		pr_notice("%s(): allocation of inode cache failed\n", __func__);
 		return NULL;
 	}
 	memset(ic, 0, sizeof(*ic));
@@ -954,7 +983,7 @@
 	struct jffs2_inode_cache *ic;
 	uint32_t crc, ino = je32_to_cpu(ri->ino);
 
-	D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
+	jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
 
 	/* We do very little here now. Just check the ino# to which we should attribute
 	   this node; we can do all the CRC checking etc. later. There's a tradeoff here --
@@ -968,9 +997,8 @@
 	/* Check the node CRC in any case. */
 	crc = crc32(0, ri, sizeof(*ri)-8);
 	if (crc != je32_to_cpu(ri->node_crc)) {
-		printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on "
-		       "node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-		       ofs, je32_to_cpu(ri->node_crc), crc);
+		pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			  __func__, ofs, je32_to_cpu(ri->node_crc), crc);
 		/*
 		 * We believe totlen because the CRC on the node
 		 * _header_ was OK, just the node itself failed.
@@ -989,10 +1017,10 @@
 	/* Wheee. It worked */
 	jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
 
-	D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
+	jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
 		  je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
 		  je32_to_cpu(ri->offset),
-		  je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
+		  je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize));
 
 	pseudo_random += je32_to_cpu(ri->version);
 
@@ -1012,15 +1040,15 @@
 	uint32_t crc;
 	int err;
 
-	D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
+	jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
 
 	/* We don't get here unless the node is still valid, so we don't have to
 	   mask in the ACCURATE bit any more. */
 	crc = crc32(0, rd, sizeof(*rd)-8);
 
 	if (crc != je32_to_cpu(rd->node_crc)) {
-		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-		       ofs, je32_to_cpu(rd->node_crc), crc);
+		pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			  __func__, ofs, je32_to_cpu(rd->node_crc), crc);
 		/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
 		if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
 			return err;
@@ -1032,7 +1060,7 @@
 	/* Should never happen. Did. (OLPC trac #4184)*/
 	checkedlen = strnlen(rd->name, rd->nsize);
 	if (checkedlen < rd->nsize) {
-		printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
+		pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
 		       ofs, checkedlen);
 	}
 	fd = jffs2_alloc_full_dirent(checkedlen+1);
@@ -1044,9 +1072,10 @@
 
 	crc = crc32(0, fd->name, rd->nsize);
 	if (crc != je32_to_cpu(rd->name_crc)) {
-		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
-		       ofs, je32_to_cpu(rd->name_crc), crc);
-		D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
+		pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			  __func__, ofs, je32_to_cpu(rd->name_crc), crc);
+		jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n",
+			  fd->name, je32_to_cpu(rd->ino));
 		jffs2_free_full_dirent(fd);
 		/* FIXME: Why do we believe totlen? */
 		/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 0f20208..aca97f3 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -23,8 +23,8 @@
 #include "nodelist.h"
 
 /* ---- Initial Security Label(s) Attachment callback --- */
-int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
-		     void *fs_info)
+static int jffs2_initxattrs(struct inode *inode,
+			    const struct xattr *xattr_array, void *fs_info)
 {
 	const struct xattr *xattr;
 	int err = 0;
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index e537fb0..c522d09 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -11,6 +11,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
@@ -442,13 +444,16 @@
 				/* This should never happen, but https://dev.laptop.org/ticket/4184 */
 				checkedlen = strnlen(spd->name, spd->nsize);
 				if (!checkedlen) {
-					printk(KERN_ERR "Dirent at %08x has zero at start of name. Aborting mount.\n",
-					       jeb->offset + je32_to_cpu(spd->offset));
+					pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n",
+					       jeb->offset +
+					       je32_to_cpu(spd->offset));
 					return -EIO;
 				}
 				if (checkedlen < spd->nsize) {
-					printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
-					       jeb->offset + je32_to_cpu(spd->offset), checkedlen);
+					pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
+					       jeb->offset +
+					       je32_to_cpu(spd->offset),
+					       checkedlen);
 				}
 
 
@@ -808,8 +813,7 @@
 
 	sum_ofs = jeb->offset + c->sector_size - jeb->free_size;
 
-	dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
-		    sum_ofs);
+	dbg_summary("writing out data to flash to pos : 0x%08x\n", sum_ofs);
 
 	ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0);
 
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index f2d96b5..f9916f3 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -69,7 +71,7 @@
 	sb->s_dirt = 0;
 
 	if (!(sb->s_flags & MS_RDONLY)) {
-		D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
+		jffs2_dbg(1, "%s()\n", __func__);
 		jffs2_flush_wbuf_gc(c, 0);
 	}
 
@@ -214,8 +216,8 @@
 						JFFS2_COMPR_MODE_FORCEZLIB;
 #endif
 			else {
-				printk(KERN_ERR "JFFS2 Error: unknown compressor \"%s\"",
-						name);
+				pr_err("Error: unknown compressor \"%s\"\n",
+				       name);
 				kfree(name);
 				return -EINVAL;
 			}
@@ -223,8 +225,8 @@
 			c->mount_opts.override_compr = true;
 			break;
 		default:
-			printk(KERN_ERR "JFFS2 Error: unrecognized mount option '%s' or missing value\n",
-					p);
+			pr_err("Error: unrecognized mount option '%s' or missing value\n",
+			       p);
 			return -EINVAL;
 		}
 	}
@@ -266,9 +268,9 @@
 	struct jffs2_sb_info *c;
 	int ret;
 
-	D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():"
+	jffs2_dbg(1, "jffs2_get_sb_mtd():"
 		  " New superblock for device %d (\"%s\")\n",
-		  sb->s_mtd->index, sb->s_mtd->name));
+		  sb->s_mtd->index, sb->s_mtd->name);
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -315,7 +317,7 @@
 {
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
-	D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
+	jffs2_dbg(2, "%s()\n", __func__);
 
 	if (sb->s_dirt)
 		jffs2_write_super(sb);
@@ -336,7 +338,7 @@
 	kfree(c->inocache_list);
 	jffs2_clear_xattr_subsystem(c);
 	mtd_sync(c->mtd);
-	D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
+	jffs2_dbg(1, "%s(): returning\n", __func__);
 }
 
 static void jffs2_kill_sb(struct super_block *sb)
@@ -371,7 +373,7 @@
 	BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
 	BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
 
-	printk(KERN_INFO "JFFS2 version 2.2."
+	pr_info("version 2.2."
 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
 	       " (NAND)"
 #endif
@@ -386,22 +388,22 @@
 						SLAB_MEM_SPREAD),
 					     jffs2_i_init_once);
 	if (!jffs2_inode_cachep) {
-		printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
+		pr_err("error: Failed to initialise inode cache\n");
 		return -ENOMEM;
 	}
 	ret = jffs2_compressors_init();
 	if (ret) {
-		printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
+		pr_err("error: Failed to initialise compressors\n");
 		goto out;
 	}
 	ret = jffs2_create_slab_caches();
 	if (ret) {
-		printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n");
+		pr_err("error: Failed to initialise slab caches\n");
 		goto out_compressors;
 	}
 	ret = register_filesystem(&jffs2_fs_type);
 	if (ret) {
-		printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n");
+		pr_err("error: Failed to register filesystem\n");
 		goto out_slab;
 	}
 	return 0;
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index e3035af..6e56333 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/namei.h>
@@ -47,10 +49,11 @@
 	 */
 
 	if (!p) {
-		printk(KERN_ERR "jffs2_follow_link(): can't find symlink target\n");
+		pr_err("%s(): can't find symlink target\n", __func__);
 		p = ERR_PTR(-EIO);
 	}
-	D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target));
+	jffs2_dbg(1, "%s(): target path is '%s'\n",
+		  __func__, (char *)f->target);
 
 	nd_set_link(nd, p);
 
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 30e8f47..74d9be1 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -11,6 +11,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
@@ -91,7 +93,7 @@
 
 	new = kmalloc(sizeof(*new), GFP_KERNEL);
 	if (!new) {
-		D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
+		jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
 		jffs2_clear_wbuf_ino_list(c);
 		c->wbuf_inodes = &inodirty_nomem;
 		return;
@@ -113,19 +115,20 @@
 	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
 		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
 
-		D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
+		jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
+			  jeb->offset);
 		list_del(this);
 		if ((jiffies + (n++)) & 127) {
 			/* Most of the time, we just erase it immediately. Otherwise we
 			   spend ages scanning it on mount, etc. */
-			D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+			jffs2_dbg(1, "...and adding to erase_pending_list\n");
 			list_add_tail(&jeb->list, &c->erase_pending_list);
 			c->nr_erasing_blocks++;
 			jffs2_garbage_collect_trigger(c);
 		} else {
 			/* Sometimes, however, we leave it elsewhere so it doesn't get
 			   immediately reused, and we spread the load a bit. */
-			D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+			jffs2_dbg(1, "...and adding to erasable_list\n");
 			list_add_tail(&jeb->list, &c->erasable_list);
 		}
 	}
@@ -136,7 +139,7 @@
 
 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
 {
-	D1(printk("About to refile bad block at %08x\n", jeb->offset));
+	jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
 
 	/* File the existing block on the bad_used_list.... */
 	if (c->nextblock == jeb)
@@ -144,12 +147,14 @@
 	else /* Not sure this should ever happen... need more coffee */
 		list_del(&jeb->list);
 	if (jeb->first_node) {
-		D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
+		jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
+			  jeb->offset);
 		list_add(&jeb->list, &c->bad_used_list);
 	} else {
 		BUG_ON(allow_empty == REFILE_NOTEMPTY);
 		/* It has to have had some nodes or we couldn't be here */
-		D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
+		jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
+			  jeb->offset);
 		list_add(&jeb->list, &c->erase_pending_list);
 		c->nr_erasing_blocks++;
 		jffs2_garbage_collect_trigger(c);
@@ -230,10 +235,12 @@
 
 	ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
 	if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
-		printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret);
+		pr_warn("%s(): Read back of page at %08x failed: %d\n",
+			__func__, c->wbuf_ofs, ret);
 		return ret;
 	} else if (retlen != c->wbuf_pagesize) {
-		printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x gave short read: %zd not %d.\n", ofs, retlen, c->wbuf_pagesize);
+		pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
+			__func__, ofs, retlen, c->wbuf_pagesize);
 		return -EIO;
 	}
 	if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
@@ -246,12 +253,12 @@
 	else
 		eccstr = "OK or unused";
 
-	printk(KERN_WARNING "Write verify error (ECC %s) at %08x. Wrote:\n",
-	       eccstr, c->wbuf_ofs);
+	pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
+		eccstr, c->wbuf_ofs);
 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
 		       c->wbuf, c->wbuf_pagesize, 0);
 
-	printk(KERN_WARNING "Read back:\n");
+	pr_warn("Read back:\n");
 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
 		       c->wbuf_verify, c->wbuf_pagesize, 0);
 
@@ -308,7 +315,7 @@
 
 	if (!first_raw) {
 		/* All nodes were obsolete. Nothing to recover. */
-		D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
+		jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
 		c->wbuf_len = 0;
 		return;
 	}
@@ -331,7 +338,7 @@
 
 		buf = kmalloc(end - start, GFP_KERNEL);
 		if (!buf) {
-			printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
+			pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
 
 			goto read_failed;
 		}
@@ -346,7 +353,7 @@
 			ret = 0;
 
 		if (ret || retlen != c->wbuf_ofs - start) {
-			printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
+			pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
 
 			kfree(buf);
 			buf = NULL;
@@ -380,7 +387,7 @@
 	/* ... and get an allocation of space from a shiny new block instead */
 	ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
 	if (ret) {
-		printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
+		pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
 		kfree(buf);
 		return;
 	}
@@ -390,7 +397,7 @@
 
 	ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
 	if (ret) {
-		printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
+		pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
 		kfree(buf);
 		return;
 	}
@@ -406,13 +413,13 @@
 		unsigned char *rewrite_buf = buf?:c->wbuf;
 		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
 
-		D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
-			  towrite, ofs));
+		jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
+			  towrite, ofs);
 
 #ifdef BREAKMEHEADER
 		static int breakme;
 		if (breakme++ == 20) {
-			printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
+			pr_notice("Faking write error at 0x%08x\n", ofs);
 			breakme = 0;
 			mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
 			ret = -EIO;
@@ -423,7 +430,7 @@
 
 		if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
 			/* Argh. We tried. Really we did. */
-			printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
+			pr_crit("Recovery of wbuf failed due to a second write error\n");
 			kfree(buf);
 
 			if (retlen)
@@ -431,7 +438,7 @@
 
 			return;
 		}
-		printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
+		pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
 
 		c->wbuf_len = (end - start) - towrite;
 		c->wbuf_ofs = ofs + towrite;
@@ -459,8 +466,8 @@
 		struct jffs2_raw_node_ref **adjust_ref = NULL;
 		struct jffs2_inode_info *f = NULL;
 
-		D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
-			  rawlen, ref_offset(raw), ref_flags(raw), ofs));
+		jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
+			  rawlen, ref_offset(raw), ref_flags(raw), ofs);
 
 		ic = jffs2_raw_ref_to_ic(raw);
 
@@ -540,7 +547,8 @@
 
 	/* Fix up the original jeb now it's on the bad_list */
 	if (first_raw == jeb->first_node) {
-		D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
+		jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
+			  jeb->offset);
 		list_move(&jeb->list, &c->erase_pending_list);
 		c->nr_erasing_blocks++;
 		jffs2_garbage_collect_trigger(c);
@@ -554,7 +562,8 @@
 
 	spin_unlock(&c->erase_completion_lock);
 
-	D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
+	jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
+		  c->wbuf_ofs, c->wbuf_len);
 
 }
 
@@ -579,7 +588,7 @@
 		return 0;
 
 	if (!mutex_is_locked(&c->alloc_sem)) {
-		printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
+		pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
 		BUG();
 	}
 
@@ -617,7 +626,7 @@
 #ifdef BREAKME
 	static int breakme;
 	if (breakme++ == 20) {
-		printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
+		pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
 		breakme = 0;
 		mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
 			  brokenbuf);
@@ -629,11 +638,11 @@
 				&retlen, c->wbuf);
 
 	if (ret) {
-		printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret);
+		pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
 		goto wfail;
 	} else if (retlen != c->wbuf_pagesize) {
-		printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
-		       retlen, c->wbuf_pagesize);
+		pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
+			retlen, c->wbuf_pagesize);
 		ret = -EIO;
 		goto wfail;
 	} else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
@@ -647,17 +656,18 @@
 	if (pad) {
 		uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
 
-		D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
-			  (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
+		jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
+			  (wbuf_jeb == c->nextblock) ? "next" : "",
+			  wbuf_jeb->offset);
 
 		/* wbuf_pagesize - wbuf_len is the amount of space that's to be
 		   padded. If there is less free space in the block than that,
 		   something screwed up */
 		if (wbuf_jeb->free_size < waste) {
-			printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
-			       c->wbuf_ofs, c->wbuf_len, waste);
-			printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
-			       wbuf_jeb->offset, wbuf_jeb->free_size);
+			pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
+				c->wbuf_ofs, c->wbuf_len, waste);
+			pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
+				wbuf_jeb->offset, wbuf_jeb->free_size);
 			BUG();
 		}
 
@@ -694,14 +704,14 @@
 	uint32_t old_wbuf_len;
 	int ret = 0;
 
-	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
+	jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
 
 	if (!c->wbuf)
 		return 0;
 
 	mutex_lock(&c->alloc_sem);
 	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
-		D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
+		jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
 		mutex_unlock(&c->alloc_sem);
 		return 0;
 	}
@@ -711,7 +721,8 @@
 
 	if (c->unchecked_size) {
 		/* GC won't make any progress for a while */
-		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
+		jffs2_dbg(1, "%s(): padding. Not finished checking\n",
+			  __func__);
 		down_write(&c->wbuf_sem);
 		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
 		/* retry flushing wbuf in case jffs2_wbuf_recover
@@ -724,7 +735,7 @@
 
 		mutex_unlock(&c->alloc_sem);
 
-		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
+		jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
 
 		ret = jffs2_garbage_collect_pass(c);
 		if (ret) {
@@ -742,7 +753,7 @@
 		mutex_lock(&c->alloc_sem);
 	}
 
-	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
+	jffs2_dbg(1, "%s(): ends...\n", __func__);
 
 	mutex_unlock(&c->alloc_sem);
 	return ret;
@@ -811,9 +822,8 @@
 	if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
 		/* It's a write to a new block */
 		if (c->wbuf_len) {
-			D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
-				  "causes flush of wbuf at 0x%08x\n",
-				  (unsigned long)to, c->wbuf_ofs));
+			jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
+				  __func__, (unsigned long)to, c->wbuf_ofs);
 			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
 			if (ret)
 				goto outerr;
@@ -825,11 +835,11 @@
 
 	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
 		/* We're not writing immediately after the writebuffer. Bad. */
-		printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
-		       "to %08lx\n", (unsigned long)to);
+		pr_crit("%s(): Non-contiguous write to %08lx\n",
+			__func__, (unsigned long)to);
 		if (c->wbuf_len)
-			printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
-			       c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
+			pr_crit("wbuf was previously %08x-%08x\n",
+				c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
 		BUG();
 	}
 
@@ -957,8 +967,8 @@
 
 	if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
 		if (ret == -EBADMSG)
-			printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
-			       " returned ECC error\n", len, ofs);
+			pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
+				len, ofs);
 		/*
 		 * We have the raw data without ECC correction in the buffer,
 		 * maybe we are lucky and all data or parts are correct. We
@@ -1034,9 +1044,8 @@
 
 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
 	if (ret || ops.oobretlen != ops.ooblen) {
-		printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
-				" bytes, read %zd bytes, error %d\n",
-				jeb->offset, ops.ooblen, ops.oobretlen, ret);
+		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
+		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
 		if (!ret)
 			ret = -EIO;
 		return ret;
@@ -1048,8 +1057,8 @@
 			continue;
 
 		if (ops.oobbuf[i] != 0xFF) {
-			D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
-				  "%08x\n", ops.oobbuf[i], i, jeb->offset));
+			jffs2_dbg(2, "Found %02x at %x in OOB for "
+				  "%08x\n", ops.oobbuf[i], i, jeb->offset);
 			return 1;
 		}
 	}
@@ -1077,9 +1086,8 @@
 
 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
 	if (ret || ops.oobretlen != ops.ooblen) {
-		printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
-				" bytes, read %zd bytes, error %d\n",
-				jeb->offset, ops.ooblen, ops.oobretlen, ret);
+		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
+		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
 		if (!ret)
 			ret = -EIO;
 		return ret;
@@ -1103,9 +1111,8 @@
 
 	ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
 	if (ret || ops.oobretlen != ops.ooblen) {
-		printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
-				" bytes, read %zd bytes, error %d\n",
-				jeb->offset, ops.ooblen, ops.oobretlen, ret);
+		pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
+		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
 		if (!ret)
 			ret = -EIO;
 		return ret;
@@ -1130,11 +1137,12 @@
 	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
 		return 0;
 
-	printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
+	pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
 	ret = mtd_block_markbad(c->mtd, bad_offset);
 
 	if (ret) {
-		D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+		jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
+			  __func__, jeb->offset, ret);
 		return ret;
 	}
 	return 1;
@@ -1151,11 +1159,11 @@
 	c->cleanmarker_size = 0;
 
 	if (!oinfo || oinfo->oobavail == 0) {
-		printk(KERN_ERR "inconsistent device description\n");
+		pr_err("inconsistent device description\n");
 		return -EINVAL;
 	}
 
-	D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
+	jffs2_dbg(1, "using OOB on NAND\n");
 
 	c->oobavail = oinfo->oobavail;
 
@@ -1222,7 +1230,7 @@
 
 	if ((c->flash_size % c->sector_size) != 0) {
 		c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
-		printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
+		pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
 	};
 
 	c->wbuf_ofs = 0xFFFFFFFF;
@@ -1239,7 +1247,8 @@
 	}
 #endif
 
-	printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
+	pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
+		c->wbuf_pagesize, c->sector_size);
 
 	return 0;
 }
@@ -1297,7 +1306,8 @@
 	if (!c->wbuf)
 		return -ENOMEM;
 
-	printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
+	pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
+		c->wbuf_pagesize, c->sector_size);
 
 	return 0;
 }
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 30d175b..b634de4c 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/crc32.h>
@@ -36,7 +38,7 @@
 	f->inocache->state = INO_STATE_PRESENT;
 
 	jffs2_add_ino_cache(c, f->inocache);
-	D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
+	jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino);
 	ri->ino = cpu_to_je32(f->inocache->ino);
 
 	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -68,7 +70,7 @@
 	unsigned long cnt = 2;
 
 	D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
-		printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n");
+		pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n");
 		BUG();
 	}
 	   );
@@ -78,7 +80,9 @@
 	vecs[1].iov_len = datalen;
 
 	if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
-		printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
+		pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n",
+			__func__, je32_to_cpu(ri->totlen),
+			sizeof(*ri), datalen);
 	}
 
 	fn = jffs2_alloc_full_dnode();
@@ -95,9 +99,9 @@
 
 	if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
 		BUG_ON(!retried);
-		D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, "
-				"highest version %d -> updating dnode\n",
-				je32_to_cpu(ri->version), f->highest_version));
+		jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n",
+			  __func__,
+			  je32_to_cpu(ri->version), f->highest_version);
 		ri->version = cpu_to_je32(++f->highest_version);
 		ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
 	}
@@ -106,8 +110,8 @@
 				 (alloc_mode==ALLOC_GC)?0:f->inocache->ino);
 
 	if (ret || (retlen != sizeof(*ri) + datalen)) {
-		printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
-		       sizeof(*ri)+datalen, flash_ofs, ret, retlen);
+		pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+			  sizeof(*ri) + datalen, flash_ofs, ret, retlen);
 
 		/* Mark the space as dirtied */
 		if (retlen) {
@@ -118,7 +122,8 @@
 			   this node */
 			jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL);
 		} else {
-			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
+			pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
+				  flash_ofs);
 		}
 		if (!retried && alloc_mode != ALLOC_NORETRY) {
 			/* Try to reallocate space and retry */
@@ -127,7 +132,7 @@
 
 			retried = 1;
 
-			D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+			jffs2_dbg(1, "Retrying failed write.\n");
 
 			jffs2_dbg_acct_sanity_check(c,jeb);
 			jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -147,14 +152,16 @@
 
 			if (!ret) {
 				flash_ofs = write_ofs(c);
-				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+				jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
+					  flash_ofs);
 
 				jffs2_dbg_acct_sanity_check(c,jeb);
 				jffs2_dbg_acct_paranoia_check(c, jeb);
 
 				goto retry;
 			}
-			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
+				  ret);
 		}
 		/* Release the full_dnode which is now useless, and return */
 		jffs2_free_full_dnode(fn);
@@ -183,10 +190,10 @@
 	fn->size = je32_to_cpu(ri->dsize);
 	fn->frags = 0;
 
-	D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
+	jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
 		  flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize),
 		  je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
-		  je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
+		  je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen));
 
 	if (retried) {
 		jffs2_dbg_acct_sanity_check(c,NULL);
@@ -206,22 +213,23 @@
 	int retried = 0;
 	int ret;
 
-	D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
+	jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
+		  __func__,
 		  je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
-		  je32_to_cpu(rd->name_crc)));
+		  je32_to_cpu(rd->name_crc));
 
 	D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
-		printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
+		pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n");
 		BUG();
 	   });
 
 	if (strnlen(name, namelen) != namelen) {
 		/* This should never happen, but seems to have done on at least one
 		   occasion: https://dev.laptop.org/ticket/4184 */
-		printk(KERN_CRIT "Error in jffs2_write_dirent() -- name contains zero bytes!\n");
-		printk(KERN_CRIT "Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n",
-		       je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
-		       je32_to_cpu(rd->name_crc));
+		pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n");
+		pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n",
+			je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
+			je32_to_cpu(rd->name_crc));
 		WARN_ON(1);
 		return ERR_PTR(-EIO);
 	}
@@ -249,9 +257,9 @@
 
 	if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) {
 		BUG_ON(!retried);
-		D1(printk(KERN_DEBUG "jffs2_write_dirent : dirent_version %d, "
-				     "highest version %d -> updating dirent\n",
-				     je32_to_cpu(rd->version), f->highest_version));
+		jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n",
+			  __func__,
+			  je32_to_cpu(rd->version), f->highest_version);
 		rd->version = cpu_to_je32(++f->highest_version);
 		fd->version = je32_to_cpu(rd->version);
 		rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
@@ -260,13 +268,14 @@
 	ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
 				 (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
 	if (ret || (retlen != sizeof(*rd) + namelen)) {
-		printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
-			       sizeof(*rd)+namelen, flash_ofs, ret, retlen);
+		pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+			  sizeof(*rd) + namelen, flash_ofs, ret, retlen);
 		/* Mark the space as dirtied */
 		if (retlen) {
 			jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL);
 		} else {
-			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
+			pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
+				  flash_ofs);
 		}
 		if (!retried) {
 			/* Try to reallocate space and retry */
@@ -275,7 +284,7 @@
 
 			retried = 1;
 
-			D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+			jffs2_dbg(1, "Retrying failed write.\n");
 
 			jffs2_dbg_acct_sanity_check(c,jeb);
 			jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -295,12 +304,14 @@
 
 			if (!ret) {
 				flash_ofs = write_ofs(c);
-				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+				jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n",
+					  flash_ofs);
 				jffs2_dbg_acct_sanity_check(c,jeb);
 				jffs2_dbg_acct_paranoia_check(c, jeb);
 				goto retry;
 			}
-			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
+				  ret);
 		}
 		/* Release the full_dnode which is now useless, and return */
 		jffs2_free_full_dirent(fd);
@@ -333,8 +344,8 @@
 	int ret = 0;
 	uint32_t writtenlen = 0;
 
-       	D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n",
-		  f->inocache->ino, offset, writelen));
+	jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n",
+		  __func__, f->inocache->ino, offset, writelen);
 
 	while(writelen) {
 		struct jffs2_full_dnode *fn;
@@ -345,12 +356,13 @@
 		int retried = 0;
 
 	retry:
-		D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
+		jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n",
+			  writelen, offset);
 
 		ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN,
 					&alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
 		if (ret) {
-			D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
+			jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret);
 			break;
 		}
 		mutex_lock(&f->sem);
@@ -386,7 +398,7 @@
 			if (!retried) {
 				/* Write error to be retried */
 				retried = 1;
-				D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n"));
+				jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n");
 				goto retry;
 			}
 			break;
@@ -399,7 +411,8 @@
 		}
 		if (ret) {
 			/* Eep */
-			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
+			jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n",
+				  ret);
 			jffs2_mark_node_obsolete(c, fn->raw);
 			jffs2_free_full_dnode(fn);
 
@@ -410,11 +423,11 @@
 		mutex_unlock(&f->sem);
 		jffs2_complete_reservation(c);
 		if (!datalen) {
-			printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
+			pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
 			ret = -EIO;
 			break;
 		}
-		D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
+		jffs2_dbg(1, "increasing writtenlen by %d\n", datalen);
 		writtenlen += datalen;
 		offset += datalen;
 		writelen -= datalen;
@@ -439,7 +452,7 @@
 	 */
 	ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
 				JFFS2_SUMMARY_INODE_SIZE);
-	D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
+	jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen);
 	if (ret)
 		return ret;
 
@@ -450,11 +463,11 @@
 
 	fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
 
-	D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n",
-		  jemode_to_cpu(ri->mode)));
+	jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n",
+		  jemode_to_cpu(ri->mode));
 
 	if (IS_ERR(fn)) {
-		D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
+		jffs2_dbg(1, "jffs2_write_dnode() failed\n");
 		/* Eeek. Wave bye bye */
 		mutex_unlock(&f->sem);
 		jffs2_complete_reservation(c);
@@ -480,7 +493,7 @@
 
 	if (ret) {
 		/* Eep. */
-		D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n"));
+		jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n");
 		return ret;
 	}
 
@@ -597,8 +610,8 @@
 			    !memcmp(fd->name, name, namelen) &&
 			    !fd->name[namelen]) {
 
-				D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
-					  fd->ino, ref_offset(fd->raw)));
+				jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n",
+					  fd->ino, ref_offset(fd->raw));
 				jffs2_mark_node_obsolete(c, fd->raw);
 				/* We don't want to remove it from the list immediately,
 				   because that screws up getdents()/seek() semantics even
@@ -627,11 +640,13 @@
 				dead_f->dents = fd->next;
 
 				if (fd->ino) {
-					printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
-					       dead_f->inocache->ino, fd->name, fd->ino);
+					pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
+						dead_f->inocache->ino,
+						fd->name, fd->ino);
 				} else {
-					D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n",
-						fd->name, dead_f->inocache->ino));
+					jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n",
+						  fd->name,
+						  dead_f->inocache->ino);
 				}
 				if (fd->raw)
 					jffs2_mark_node_obsolete(c, fd->raw);
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 3e93cdd..b55b803 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
diff --git a/fs/libfs.c b/fs/libfs.c
index 4a0d1f0..18d08f5 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -264,6 +264,13 @@
 	return ERR_PTR(-ENOMEM);
 }
 
+int simple_open(struct inode *inode, struct file *file)
+{
+	if (inode->i_private)
+		file->private_data = inode->i_private;
+	return 0;
+}
+
 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 {
 	struct inode *inode = old_dentry->d_inode;
@@ -522,6 +529,7 @@
 	return 0;
 out:
 	d_genocide(root);
+	shrink_dcache_parent(root);
 	dput(root);
 	return -ENOMEM;
 }
@@ -984,6 +992,7 @@
 EXPORT_SYMBOL(simple_empty);
 EXPORT_SYMBOL(simple_fill_super);
 EXPORT_SYMBOL(simple_getattr);
+EXPORT_SYMBOL(simple_open);
 EXPORT_SYMBOL(simple_link);
 EXPORT_SYMBOL(simple_lookup);
 EXPORT_SYMBOL(simple_pin_fs);
diff --git a/fs/locks.c b/fs/locks.c
index 637694b..0d68f1f 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -510,12 +510,13 @@
 
 /*
  */
-static void locks_delete_block(struct file_lock *waiter)
+void locks_delete_block(struct file_lock *waiter)
 {
 	lock_flocks();
 	__locks_delete_block(waiter);
 	unlock_flocks();
 }
+EXPORT_SYMBOL(locks_delete_block);
 
 /* Insert waiter into blocker's block list.
  * We use a circular list so that processes can be easily woken up in
diff --git a/fs/namei.c b/fs/namei.c
index e615ff3..0062dd1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1054,27 +1054,65 @@
 }
 
 /*
- * Allocate a dentry with name and parent, and perform a parent
- * directory ->lookup on it. Returns the new dentry, or ERR_PTR
- * on error. parent->d_inode->i_mutex must be held. d_lookup must
- * have verified that no child exists while under i_mutex.
+ * This looks up the name in dcache, possibly revalidates the old dentry and
+ * allocates a new one if not found or not valid.  In the need_lookup argument
+ * returns whether i_op->lookup is necessary.
+ *
+ * dir->d_inode->i_mutex must be held
  */
-static struct dentry *d_alloc_and_lookup(struct dentry *parent,
-				struct qstr *name, struct nameidata *nd)
+static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
+				    struct nameidata *nd, bool *need_lookup)
 {
-	struct inode *inode = parent->d_inode;
 	struct dentry *dentry;
+	int error;
+
+	*need_lookup = false;
+	dentry = d_lookup(dir, name);
+	if (dentry) {
+		if (d_need_lookup(dentry)) {
+			*need_lookup = true;
+		} else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
+			error = d_revalidate(dentry, nd);
+			if (unlikely(error <= 0)) {
+				if (error < 0) {
+					dput(dentry);
+					return ERR_PTR(error);
+				} else if (!d_invalidate(dentry)) {
+					dput(dentry);
+					dentry = NULL;
+				}
+			}
+		}
+	}
+
+	if (!dentry) {
+		dentry = d_alloc(dir, name);
+		if (unlikely(!dentry))
+			return ERR_PTR(-ENOMEM);
+
+		*need_lookup = true;
+	}
+	return dentry;
+}
+
+/*
+ * Call i_op->lookup on the dentry.  The dentry must be negative but may be
+ * hashed if it was pouplated with DCACHE_NEED_LOOKUP.
+ *
+ * dir->d_inode->i_mutex must be held
+ */
+static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
+				  struct nameidata *nd)
+{
 	struct dentry *old;
 
 	/* Don't create child dentry for a dead directory. */
-	if (unlikely(IS_DEADDIR(inode)))
+	if (unlikely(IS_DEADDIR(dir))) {
+		dput(dentry);
 		return ERR_PTR(-ENOENT);
+	}
 
-	dentry = d_alloc(parent, name);
-	if (unlikely(!dentry))
-		return ERR_PTR(-ENOMEM);
-
-	old = inode->i_op->lookup(inode, dentry, nd);
+	old = dir->i_op->lookup(dir, dentry, nd);
 	if (unlikely(old)) {
 		dput(dentry);
 		dentry = old;
@@ -1082,30 +1120,17 @@
 	return dentry;
 }
 
-/*
- * We already have a dentry, but require a lookup to be performed on the parent
- * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error.
- * parent->d_inode->i_mutex must be held. d_lookup must have verified that no
- * child exists while under i_mutex.
- */
-static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry,
-				     struct nameidata *nd)
+static struct dentry *__lookup_hash(struct qstr *name,
+		struct dentry *base, struct nameidata *nd)
 {
-	struct inode *inode = parent->d_inode;
-	struct dentry *old;
+	bool need_lookup;
+	struct dentry *dentry;
 
-	/* Don't create child dentry for a dead directory. */
-	if (unlikely(IS_DEADDIR(inode))) {
-		dput(dentry);
-		return ERR_PTR(-ENOENT);
-	}
+	dentry = lookup_dcache(name, base, nd, &need_lookup);
+	if (!need_lookup)
+		return dentry;
 
-	old = inode->i_op->lookup(inode, dentry, nd);
-	if (unlikely(old)) {
-		dput(dentry);
-		dentry = old;
-	}
-	return dentry;
+	return lookup_real(base->d_inode, dentry, nd);
 }
 
 /*
@@ -1139,6 +1164,8 @@
 			return -ECHILD;
 		nd->seq = seq;
 
+		if (unlikely(d_need_lookup(dentry)))
+			goto unlazy;
 		if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
 			status = d_revalidate(dentry, nd);
 			if (unlikely(status <= 0)) {
@@ -1147,8 +1174,6 @@
 				goto unlazy;
 			}
 		}
-		if (unlikely(d_need_lookup(dentry)))
-			goto unlazy;
 		path->mnt = mnt;
 		path->dentry = dentry;
 		if (unlikely(!__follow_mount_rcu(nd, path, inode)))
@@ -1163,38 +1188,14 @@
 		dentry = __d_lookup(parent, name);
 	}
 
-	if (dentry && unlikely(d_need_lookup(dentry))) {
-		dput(dentry);
-		dentry = NULL;
-	}
-retry:
-	if (unlikely(!dentry)) {
-		struct inode *dir = parent->d_inode;
-		BUG_ON(nd->inode != dir);
+	if (unlikely(!dentry))
+		goto need_lookup;
 
-		mutex_lock(&dir->i_mutex);
-		dentry = d_lookup(parent, name);
-		if (likely(!dentry)) {
-			dentry = d_alloc_and_lookup(parent, name, nd);
-			if (IS_ERR(dentry)) {
-				mutex_unlock(&dir->i_mutex);
-				return PTR_ERR(dentry);
-			}
-			/* known good */
-			need_reval = 0;
-			status = 1;
-		} else if (unlikely(d_need_lookup(dentry))) {
-			dentry = d_inode_lookup(parent, dentry, nd);
-			if (IS_ERR(dentry)) {
-				mutex_unlock(&dir->i_mutex);
-				return PTR_ERR(dentry);
-			}
-			/* known good */
-			need_reval = 0;
-			status = 1;
-		}
-		mutex_unlock(&dir->i_mutex);
+	if (unlikely(d_need_lookup(dentry))) {
+		dput(dentry);
+		goto need_lookup;
 	}
+
 	if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
 		status = d_revalidate(dentry, nd);
 	if (unlikely(status <= 0)) {
@@ -1204,12 +1205,10 @@
 		}
 		if (!d_invalidate(dentry)) {
 			dput(dentry);
-			dentry = NULL;
-			need_reval = 1;
-			goto retry;
+			goto need_lookup;
 		}
 	}
-
+done:
 	path->mnt = mnt;
 	path->dentry = dentry;
 	err = follow_managed(path, nd->flags);
@@ -1221,6 +1220,16 @@
 		nd->flags |= LOOKUP_JUMPED;
 	*inode = path->dentry->d_inode;
 	return 0;
+
+need_lookup:
+	BUG_ON(nd->inode != parent->d_inode);
+
+	mutex_lock(&parent->d_inode->i_mutex);
+	dentry = __lookup_hash(name, parent, nd);
+	mutex_unlock(&parent->d_inode->i_mutex);
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+	goto done;
 }
 
 static inline int may_lookup(struct nameidata *nd)
@@ -1398,18 +1407,9 @@
  */
 #ifdef CONFIG_DCACHE_WORD_ACCESS
 
-#ifdef CONFIG_64BIT
+#include <asm/word-at-a-time.h>
 
-/*
- * Jan Achrenius on G+: microoptimized version of
- * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
- * that works for the bytemasks without having to
- * mask them first.
- */
-static inline long count_masked_bytes(unsigned long mask)
-{
-	return mask*0x0001020304050608ul >> 56;
-}
+#ifdef CONFIG_64BIT
 
 static inline unsigned int fold_hash(unsigned long hash)
 {
@@ -1419,15 +1419,6 @@
 
 #else	/* 32-bit case */
 
-/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
-static inline long count_masked_bytes(long mask)
-{
-	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
-	long a = (0x0ff0001+mask) >> 23;
-	/* Fix the 1 for 00 case */
-	return a & mask;
-}
-
 #define fold_hash(x) (x)
 
 #endif
@@ -1455,17 +1446,6 @@
 }
 EXPORT_SYMBOL(full_name_hash);
 
-#define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
-#define ONEBYTES	REPEAT_BYTE(0x01)
-#define SLASHBYTES	REPEAT_BYTE('/')
-#define HIGHBITS	REPEAT_BYTE(0x80)
-
-/* Return the high bit set in the first byte that is a zero */
-static inline unsigned long has_zero(unsigned long a)
-{
-	return ((a - ONEBYTES) & ~a) & HIGHBITS;
-}
-
 /*
  * Calculate the length and hash of the path component, and
  * return the length of the component;
@@ -1481,7 +1461,7 @@
 		len += sizeof(unsigned long);
 		a = *(unsigned long *)(name+len);
 		/* Do we have any NUL or '/' bytes in this word? */
-		mask = has_zero(a) | has_zero(a ^ SLASHBYTES);
+		mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
 	} while (!mask);
 
 	/* The mask *below* the first high bit set */
@@ -1846,59 +1826,6 @@
 	return err;
 }
 
-static struct dentry *__lookup_hash(struct qstr *name,
-		struct dentry *base, struct nameidata *nd)
-{
-	struct inode *inode = base->d_inode;
-	struct dentry *dentry;
-	int err;
-
-	err = inode_permission(inode, MAY_EXEC);
-	if (err)
-		return ERR_PTR(err);
-
-	/*
-	 * Don't bother with __d_lookup: callers are for creat as
-	 * well as unlink, so a lot of the time it would cost
-	 * a double lookup.
-	 */
-	dentry = d_lookup(base, name);
-
-	if (dentry && d_need_lookup(dentry)) {
-		/*
-		 * __lookup_hash is called with the parent dir's i_mutex already
-		 * held, so we are good to go here.
-		 */
-		dentry = d_inode_lookup(base, dentry, nd);
-		if (IS_ERR(dentry))
-			return dentry;
-	}
-
-	if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) {
-		int status = d_revalidate(dentry, nd);
-		if (unlikely(status <= 0)) {
-			/*
-			 * The dentry failed validation.
-			 * If d_revalidate returned 0 attempt to invalidate
-			 * the dentry otherwise d_revalidate is asking us
-			 * to return a fail status.
-			 */
-			if (status < 0) {
-				dput(dentry);
-				return ERR_PTR(status);
-			} else if (!d_invalidate(dentry)) {
-				dput(dentry);
-				dentry = NULL;
-			}
-		}
-	}
-
-	if (!dentry)
-		dentry = d_alloc_and_lookup(base, name, nd);
-
-	return dentry;
-}
-
 /*
  * Restricted form of lookup. Doesn't follow links, single-component only,
  * needs parent already locked. Doesn't follow mounts.
@@ -1924,6 +1851,7 @@
 {
 	struct qstr this;
 	unsigned int c;
+	int err;
 
 	WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
 
@@ -1948,6 +1876,10 @@
 			return ERR_PTR(err);
 	}
 
+	err = inode_permission(base->d_inode, MAY_EXEC);
+	if (err)
+		return ERR_PTR(err);
+
 	return __lookup_hash(&this, base, NULL);
 }
 
@@ -2749,7 +2681,7 @@
 
 /*
  * The dentry_unhash() helper will try to drop the dentry early: we
- * should have a usage count of 2 if we're the only user of this
+ * should have a usage count of 1 if we're the only user of this
  * dentry, and if that is true (possibly after pruning the dcache),
  * then we drop the dentry now.
  *
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index a6fda3c..a1a1bfd 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -28,8 +28,6 @@
 #include "suballoc.h"
 #include "move_extents.h"
 
-#include <linux/ext2_fs.h>
-
 #define o2info_from_user(a, b)	\
 		copy_from_user(&(a), (b), sizeof(a))
 #define o2info_to_user(a, b)	\
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 46a15d8..eed44bf 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -115,12 +115,13 @@
 	if (IS_ERR(sb))
 		return ERR_CAST(sb);
 
+	if (!proc_parse_options(options, ns)) {
+		deactivate_locked_super(sb);
+		return ERR_PTR(-EINVAL);
+	}
+
 	if (!sb->s_root) {
 		sb->s_flags = flags;
-		if (!proc_parse_options(options, ns)) {
-			deactivate_locked_super(sb);
-			return ERR_PTR(-EINVAL);
-		}
 		err = proc_fill_super(sb);
 		if (err) {
 			deactivate_locked_super(sb);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index f37c32b..1950788 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -52,12 +52,6 @@
 	char	data[];
 };
 
-static int pstore_file_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t pstore_file_read(struct file *file, char __user *userbuf,
 						size_t count, loff_t *ppos)
 {
@@ -67,7 +61,7 @@
 }
 
 static const struct file_operations pstore_file_operations = {
-	.open	= pstore_file_open,
+	.open	= simple_open,
 	.read	= pstore_file_read,
 	.llseek	= default_llseek,
 };
@@ -105,26 +99,12 @@
 	.unlink		= pstore_unlink,
 };
 
-static struct inode *pstore_get_inode(struct super_block *sb,
-					const struct inode *dir, int mode, dev_t dev)
+static struct inode *pstore_get_inode(struct super_block *sb)
 {
 	struct inode *inode = new_inode(sb);
-
 	if (inode) {
 		inode->i_ino = get_next_ino();
-		inode->i_uid = inode->i_gid = 0;
-		inode->i_mode = mode;
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-		switch (mode & S_IFMT) {
-		case S_IFREG:
-			inode->i_fop = &pstore_file_operations;
-			break;
-		case S_IFDIR:
-			inode->i_op = &pstore_dir_inode_operations;
-			inode->i_fop = &simple_dir_operations;
-			inc_nlink(inode);
-			break;
-		}
 	}
 	return inode;
 }
@@ -216,9 +196,11 @@
 		return rc;
 
 	rc = -ENOMEM;
-	inode = pstore_get_inode(pstore_sb, root->d_inode, S_IFREG | 0444, 0);
+	inode = pstore_get_inode(pstore_sb);
 	if (!inode)
 		goto fail;
+	inode->i_mode = S_IFREG | 0444;
+	inode->i_fop = &pstore_file_operations;
 	private = kmalloc(sizeof *private + size, GFP_KERNEL);
 	if (!private)
 		goto fail_alloc;
@@ -293,10 +275,12 @@
 
 	parse_options(data);
 
-	inode = pstore_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+	inode = pstore_get_inode(sb);
 	if (inode) {
-		/* override ramfs "dir" options so we catch unlink(2) */
+		inode->i_mode = S_IFDIR | 0755;
 		inode->i_op = &pstore_dir_inode_operations;
+		inode->i_fop = &simple_dir_operations;
+		inc_nlink(inode);
 	}
 	sb->s_root = d_make_root(inode);
 	if (!sb->s_root)
diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
index 71e2b4d..f86f51f 100644
--- a/fs/romfs/storage.c
+++ b/fs/romfs/storage.c
@@ -19,7 +19,7 @@
 #endif
 
 #ifdef CONFIG_ROMFS_ON_MTD
-#define ROMFS_MTD_READ(sb, ...) ((sb)->s_mtd->read((sb)->s_mtd, ##__VA_ARGS__))
+#define ROMFS_MTD_READ(sb, ...) mtd_read((sb)->s_mtd, ##__VA_ARGS__)
 
 /*
  * read data from an romfs image on an MTD device
diff --git a/fs/splice.c b/fs/splice.c
index 5f883de..f847684 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -30,6 +30,7 @@
 #include <linux/uio.h>
 #include <linux/security.h>
 #include <linux/gfp.h>
+#include <linux/socket.h>
 
 /*
  * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -690,7 +691,9 @@
 	if (!likely(file->f_op && file->f_op->sendpage))
 		return -EINVAL;
 
-	more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
+	more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+	if (sd->len < sd->total_len)
+		more |= MSG_SENDPAGE_NOTLAST;
 	return file->f_op->sendpage(file, buf->page, buf->offset,
 				    sd->len, &pos, more);
 }
diff --git a/fs/xattr.c b/fs/xattr.c
index d6dfd24..3c8c1cc 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -19,8 +19,9 @@
 #include <linux/export.h>
 #include <linux/fsnotify.h>
 #include <linux/audit.h>
-#include <asm/uaccess.h>
+#include <linux/vmalloc.h>
 
+#include <asm/uaccess.h>
 
 /*
  * Check permissions for extended attribute access.  This is a bit complicated
@@ -320,6 +321,7 @@
 {
 	int error;
 	void *kvalue = NULL;
+	void *vvalue = NULL;	/* If non-NULL, we used vmalloc() */
 	char kname[XATTR_NAME_MAX + 1];
 
 	if (flags & ~(XATTR_CREATE|XATTR_REPLACE))
@@ -334,13 +336,25 @@
 	if (size) {
 		if (size > XATTR_SIZE_MAX)
 			return -E2BIG;
-		kvalue = memdup_user(value, size);
-		if (IS_ERR(kvalue))
-			return PTR_ERR(kvalue);
+		kvalue = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+		if (!kvalue) {
+			vvalue = vmalloc(size);
+			if (!vvalue)
+				return -ENOMEM;
+			kvalue = vvalue;
+		}
+		if (copy_from_user(kvalue, value, size)) {
+			error = -EFAULT;
+			goto out;
+		}
 	}
 
 	error = vfs_setxattr(d, kname, kvalue, size, flags);
-	kfree(kvalue);
+out:
+	if (vvalue)
+		vfree(vvalue);
+	else
+		kfree(kvalue);
 	return error;
 }
 
@@ -492,13 +506,18 @@
 {
 	ssize_t error;
 	char *klist = NULL;
+	char *vlist = NULL;	/* If non-NULL, we used vmalloc() */
 
 	if (size) {
 		if (size > XATTR_LIST_MAX)
 			size = XATTR_LIST_MAX;
-		klist = kmalloc(size, GFP_KERNEL);
-		if (!klist)
-			return -ENOMEM;
+		klist = kmalloc(size, __GFP_NOWARN | GFP_KERNEL);
+		if (!klist) {
+			vlist = vmalloc(size);
+			if (!vlist)
+				return -ENOMEM;
+			klist = vlist;
+		}
 	}
 
 	error = vfs_listxattr(d, klist, size);
@@ -510,7 +529,10 @@
 		   than XATTR_LIST_MAX bytes. Not possible. */
 		error = -E2BIG;
 	}
-	kfree(klist);
+	if (vlist)
+		vfree(vlist);
+	else
+		kfree(klist);
 	return error;
 }
 
diff --git a/drivers/acpi/acpica/acconfig.h b/include/acpi/acconfig.h
similarity index 92%
rename from drivers/acpi/acpica/acconfig.h
rename to include/acpi/acconfig.h
index 1f30af6..03f1485 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -85,6 +85,23 @@
  */
 #define ACPI_CHECKSUM_ABORT             FALSE
 
+/*
+ * Generate a version of ACPICA that only supports "reduced hardware"
+ * platforms (as defined in ACPI 5.0). Set to TRUE to generate a specialized
+ * version of ACPICA that ONLY supports the ACPI 5.0 "reduced hardware"
+ * model. In other words, no ACPI hardware is supported.
+ *
+ * If TRUE, this means no support for the following:
+ *      PM Event and Control registers
+ *      SCI interrupt (and handler)
+ *      Fixed Events
+ *      General Purpose Events (GPEs)
+ *      Global Lock
+ *      ACPI PM timer
+ *      FACS table (Waking vectors and Global Lock)
+ */
+#define ACPI_REDUCED_HARDWARE           FALSE
+
 /******************************************************************************
  *
  * Subsystem Constants
@@ -93,7 +110,7 @@
 
 /* Version of ACPI supported */
 
-#define ACPI_CA_SUPPORT_LEVEL           3
+#define ACPI_CA_SUPPORT_LEVEL           5
 
 /* Maximum count for a semaphore object */
 
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 5b6c391..92d6e1d 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -57,6 +57,7 @@
 #define ACPI_SUCCESS(a)                 (!(a))
 #define ACPI_FAILURE(a)                 (a)
 
+#define ACPI_SKIP(a)                    (a == AE_CTRL_SKIP)
 #define AE_OK                           (acpi_status) 0x0000
 
 /*
@@ -89,8 +90,9 @@
 #define AE_SAME_HANDLER                 (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
 #define AE_NO_HANDLER                   (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
 #define AE_OWNER_ID_LIMIT               (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
+#define AE_NOT_CONFIGURED               (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL)
 
-#define AE_CODE_ENV_MAX                 0x001B
+#define AE_CODE_ENV_MAX                 0x001C
 
 /*
  * Programmer exceptions
@@ -213,7 +215,8 @@
 	"AE_ABORT_METHOD",
 	"AE_SAME_HANDLER",
 	"AE_NO_HANDLER",
-	"AE_OWNER_ID_LIMIT"
+	"AE_OWNER_ID_LIMIT",
+	"AE_NOT_CONFIGURED"
 };
 
 char const *acpi_gbl_exception_names_pgm[] = {
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 5b5af0d..38f5088 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -46,6 +46,7 @@
 
 /* Method names - these methods can appear anywhere in the namespace */
 
+#define METHOD_NAME__SB_        "_SB_"
 #define METHOD_NAME__HID        "_HID"
 #define METHOD_NAME__CID        "_CID"
 #define METHOD_NAME__UID        "_UID"
@@ -64,11 +65,11 @@
 
 /* Method names - these methods must appear at the namespace root */
 
-#define METHOD_NAME__BFS        "\\_BFS"
-#define METHOD_NAME__GTS        "\\_GTS"
-#define METHOD_NAME__PTS        "\\_PTS"
-#define METHOD_NAME__SST        "\\_SI._SST"
-#define METHOD_NAME__WAK        "\\_WAK"
+#define METHOD_PATHNAME__BFS    "\\_BFS"
+#define METHOD_PATHNAME__GTS    "\\_GTS"
+#define METHOD_PATHNAME__PTS    "\\_PTS"
+#define METHOD_PATHNAME__SST    "\\_SI._SST"
+#define METHOD_PATHNAME__WAK    "\\_WAK"
 
 /* Definitions of the predefined namespace names  */
 
@@ -79,6 +80,5 @@
 #define ACPI_PREFIX_LOWER       (u32) 0x69706361	/* "acpi" */
 
 #define ACPI_NS_ROOT_PATH       "\\"
-#define ACPI_NS_SYSTEM_BUS      "_SB_"
 
 #endif				/* __ACNAMES_H__  */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 6cd5b64..f1c8ca6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -323,6 +323,8 @@
 int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
 bool acpi_bus_can_wakeup(acpi_handle handle);
+int acpi_power_resource_register_device(struct device *dev, acpi_handle handle);
+void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle);
 #ifdef CONFIG_ACPI_PROC_EVENT
 int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
 int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
@@ -392,8 +394,13 @@
 #endif
 
 #ifdef CONFIG_PM_SLEEP
+int acpi_pm_device_run_wake(struct device *, bool);
 int acpi_pm_device_sleep_wake(struct device *, bool);
 #else
+static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
+{
+	return -ENODEV;
+}
 static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
 {
 	return -ENODEV;
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 7c9aebe..21a5548 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -95,6 +95,11 @@
 acpi_os_table_override(struct acpi_table_header *existing_table,
 		       struct acpi_table_header **new_table);
 
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+				acpi_physical_address * new_address,
+				u32 *new_table_length);
+
 /*
  * Spinlock primitives
  */
@@ -217,14 +222,10 @@
  * Platform and hardware-independent physical memory interfaces
  */
 acpi_status
-acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width);
-acpi_status
-acpi_os_read_memory64(acpi_physical_address address, u64 *value, u32 width);
+acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
 
 acpi_status
-acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
-acpi_status
-acpi_os_write_memory64(acpi_physical_address address, u64 value, u32 width);
+acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width);
 
 /*
  * Platform and hardware-independent PCI configuration space access
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index a28da35..9821101 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,8 +47,9 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20120111
+#define ACPI_CA_VERSION                 0x20120320
 
+#include "acconfig.h"
 #include "actypes.h"
 #include "actbl.h"
 
@@ -71,6 +72,33 @@
 extern u8 acpi_gbl_truncate_io_addresses;
 extern u8 acpi_gbl_disable_auto_repair;
 
+/*
+ * Hardware-reduced prototypes. All interfaces that use these macros will
+ * be configured out of the ACPICA build if the ACPI_REDUCED_HARDWARE flag
+ * is set to TRUE.
+ */
+#if (!ACPI_REDUCED_HARDWARE)
+#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
+	prototype;
+
+#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
+	prototype;
+
+#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
+	prototype;
+
+#else
+#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
+	static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);}
+
+#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
+	static ACPI_INLINE prototype {return(AE_OK);}
+
+#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
+	static ACPI_INLINE prototype {}
+
+#endif				/* !ACPI_REDUCED_HARDWARE */
+
 extern u32 acpi_current_gpe_count;
 extern struct acpi_table_fadt acpi_gbl_FADT;
 extern u8 acpi_gbl_system_awake_and_running;
@@ -96,9 +124,8 @@
 acpi_status acpi_subsystem_status(void);
 #endif
 
-acpi_status acpi_enable(void);
-
-acpi_status acpi_disable(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
@@ -235,17 +262,34 @@
 acpi_status
 acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
 
-acpi_status
-acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler,
-				 void *context);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_install_global_event_handler
+				(ACPI_GBL_EVENT_HANDLER handler, void *context))
 
-acpi_status
-acpi_install_fixed_event_handler(u32 acpi_event,
-				 acpi_event_handler handler, void *context);
-
-acpi_status
-acpi_remove_fixed_event_handler(u32 acpi_event, acpi_event_handler handler);
-
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_install_fixed_event_handler(u32
+								  acpi_event,
+								  acpi_event_handler
+								  handler,
+								  void
+								  *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_remove_fixed_event_handler(u32 acpi_event,
+								 acpi_event_handler
+								 handler))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_install_gpe_handler(acpi_handle
+							  gpe_device,
+							  u32 gpe_number,
+							  u32 type,
+							  acpi_gpe_handler
+							  address,
+							  void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_remove_gpe_handler(acpi_handle gpe_device,
+							 u32 gpe_number,
+							 acpi_gpe_handler
+							 address))
 acpi_status
 acpi_install_notify_handler(acpi_handle device,
 			    u32 handler_type,
@@ -266,15 +310,6 @@
 				  acpi_adr_space_type space_id,
 				  acpi_adr_space_handler handler);
 
-acpi_status
-acpi_install_gpe_handler(acpi_handle gpe_device,
-			 u32 gpe_number,
-			 u32 type, acpi_gpe_handler address, void *context);
-
-acpi_status
-acpi_remove_gpe_handler(acpi_handle gpe_device,
-			u32 gpe_number, acpi_gpe_handler address);
-
 #ifdef ACPI_FUTURE_USAGE
 acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
 #endif
@@ -284,9 +319,11 @@
 /*
  * Global Lock interfaces
  */
-acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
-
-acpi_status acpi_release_global_lock(u32 handle);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_acquire_global_lock(u16 timeout,
+							 u32 *handle))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_release_global_lock(u32 handle))
 
 /*
  * Interfaces to AML mutex objects
@@ -299,47 +336,75 @@
 /*
  * Fixed Event interfaces
  */
-acpi_status acpi_enable_event(u32 event, u32 flags);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_enable_event(u32 event, u32 flags))
 
-acpi_status acpi_disable_event(u32 event, u32 flags);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_disable_event(u32 event, u32 flags))
 
-acpi_status acpi_clear_event(u32 event);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_clear_event(u32 event))
 
-acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
-
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_get_event_status(u32 event,
+						      acpi_event_status
+						      *event_status))
 /*
  * General Purpose Event (GPE) Interfaces
  */
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_update_all_gpes(void))
 
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_enable_gpe(acpi_handle gpe_device,
+						u32 gpe_number))
 
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_disable_gpe(acpi_handle gpe_device,
+						 u32 gpe_number))
 
-acpi_status
-acpi_setup_gpe_for_wake(acpi_handle parent_device,
-			acpi_handle gpe_device, u32 gpe_number);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_clear_gpe(acpi_handle gpe_device,
+					       u32 gpe_number))
 
-acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_set_gpe(acpi_handle gpe_device,
+					     u32 gpe_number, u8 action))
 
-acpi_status
-acpi_get_gpe_status(acpi_handle gpe_device,
-		    u32 gpe_number, acpi_event_status *event_status);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_finish_gpe(acpi_handle gpe_device,
+						u32 gpe_number))
 
-acpi_status acpi_disable_all_gpes(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_setup_gpe_for_wake(acpi_handle
+							parent_device,
+							acpi_handle gpe_device,
+							u32 gpe_number))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_set_gpe_wake_mask(acpi_handle gpe_device,
+							u32 gpe_number,
+							u8 action))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_get_gpe_status(acpi_handle gpe_device,
+						     u32 gpe_number,
+						     acpi_event_status
+						     *event_status))
 
-acpi_status acpi_enable_all_runtime_gpes(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
 
-acpi_status acpi_get_gpe_device(u32 gpe_index, acpi_handle *gpe_device);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
 
-acpi_status
-acpi_install_gpe_block(acpi_handle gpe_device,
-		       struct acpi_generic_address *gpe_block_address,
-		       u32 register_count, u32 interrupt_number);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_get_gpe_device(u32 gpe_index,
+						    acpi_handle * gpe_device))
 
-acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
-
-acpi_status acpi_update_all_gpes(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_install_gpe_block(acpi_handle gpe_device,
+						       struct
+						       acpi_generic_address
+						       *gpe_block_address,
+						       u32 register_count,
+						       u32 interrupt_number))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_remove_gpe_block(acpi_handle gpe_device))
 
 /*
  * Resource interfaces
@@ -391,34 +456,60 @@
  */
 acpi_status acpi_reset(void);
 
-acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_read_bit_register(u32 register_id,
+						       u32 *return_value))
 
-acpi_status acpi_write_bit_register(u32 register_id, u32 value);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_write_bit_register(u32 register_id,
+							u32 value))
 
-acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_set_firmware_waking_vector(u32
+								physical_address))
 
 #if ACPI_MACHINE_WIDTH == 64
-acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_set_firmware_waking_vector64(u64
+								  physical_address))
 #endif
 
 acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg);
 
 acpi_status acpi_write(u64 value, struct acpi_generic_address *reg);
 
+/*
+ * Sleep/Wake interfaces
+ */
 acpi_status
 acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
 
 acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
 
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
 
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void);
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
 
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
 
 acpi_status acpi_leave_sleep_state(u8 sleep_state);
 
 /*
+ * ACPI Timer interfaces
+ */
+#ifdef ACPI_FUTURE_USAGE
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_get_timer_resolution(u32 *resolution))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer(u32 *ticks))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				acpi_get_timer_duration(u32 start_ticks,
+							u32 end_ticks,
+							u32 *time_elapsed))
+#endif				/* ACPI_FUTURE_USAGE */
+
+/*
  * Error/Warning output
  */
 void ACPI_INTERNAL_VAR_XFACE
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 8e1b92f..8dea546 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -309,6 +309,13 @@
 	PM_TABLET = 8
 };
 
+/* Values for sleep_status and sleep_control registers (V5 FADT) */
+
+#define ACPI_X_WAKE_STATUS          0x80
+#define ACPI_X_SLEEP_TYPE_MASK      0x1C
+#define ACPI_X_SLEEP_TYPE_POSITION  0x02
+#define ACPI_X_SLEEP_ENABLE         0x20
+
 /* Reset to default packing */
 
 #pragma pack()
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index d5dee7c..eba6604 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -518,6 +518,13 @@
 #define ACPI_SLEEP_TYPE_INVALID         0xFF
 
 /*
+ * Sleep/Wake flags
+ */
+#define ACPI_NO_OPTIONAL_METHODS        0x00	/* Do not execute any optional methods */
+#define ACPI_EXECUTE_GTS                0x01	/* For enter sleep interface */
+#define ACPI_EXECUTE_BFS                0x02	/* For leave sleep prep interface */
+
+/*
  * Standard notify values
  */
 #define ACPI_NOTIFY_BUS_CHECK           (u8) 0x00
@@ -532,8 +539,9 @@
 #define ACPI_NOTIFY_DEVICE_PLD_CHECK    (u8) 0x09
 #define ACPI_NOTIFY_RESERVED            (u8) 0x0A
 #define ACPI_NOTIFY_LOCALITY_UPDATE     (u8) 0x0B
+#define ACPI_NOTIFY_SHUTDOWN_REQUEST    (u8) 0x0C
 
-#define ACPI_NOTIFY_MAX                 0x0B
+#define ACPI_NOTIFY_MAX                 0x0C
 
 /*
  * Types associated with ACPI names and objects. The first group of
@@ -698,7 +706,8 @@
 #define ACPI_ALL_NOTIFY                 (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY)
 #define ACPI_MAX_NOTIFY_HANDLER_TYPE    0x3
 
-#define ACPI_MAX_SYS_NOTIFY             0x7f
+#define ACPI_MAX_SYS_NOTIFY             0x7F
+#define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF
 
 /* Address Space (Operation Region) Types */
 
@@ -786,6 +795,15 @@
 #define ACPI_ENABLE_EVENT                       1
 #define ACPI_DISABLE_EVENT                      0
 
+/* Sleep function dispatch */
+
+typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
+
+struct acpi_sleep_functions {
+	ACPI_SLEEP_FUNCTION legacy_function;
+	ACPI_SLEEP_FUNCTION extended_function;
+};
+
 /*
  * External ACPI object definition
  */
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 8a36183..1488302 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -10,6 +10,7 @@
 #error "Cannot use generic cmpxchg on SMP"
 #endif
 
+#include <linux/types.h>
 #include <linux/irqflags.h>
 
 #ifndef xchg
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index d838c94..2eba340 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -31,6 +31,8 @@
 	crypto_set_spawn(&spawn->base, inst);
 }
 
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
+
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
 		     u32 type, u32 mask);
 
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 3a748a6..06e8b32 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -34,6 +34,8 @@
 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
 			 u32 type, u32 mask);
 
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
+
 static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
 {
 	crypto_drop_spawn(&spawn->base);
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 34a7b89..64ff02d 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -617,6 +617,17 @@
 	__u64 value;
 };
 
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+	__u32 handle;
+
+	/** Flags.. only applicable for handle->fd */
+	__u32 flags;
+
+	/** Returned dmabuf file descriptor */
+	__s32 fd;
+};
+
 #include "drm_mode.h"
 
 #define DRM_IOCTL_BASE			'd'
@@ -673,7 +684,8 @@
 #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
 #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
 
-#define DRM_IOCTL_GEM_PRIME_OPEN        DRM_IOWR(0x2e, struct drm_gem_open)
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
 
 #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
 #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 574bd1c..dd73104 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -91,6 +91,7 @@
 #define DRM_UT_CORE 		0x01
 #define DRM_UT_DRIVER		0x02
 #define DRM_UT_KMS		0x04
+#define DRM_UT_PRIME		0x08
 /*
  * Three debug levels are defined.
  * drm_core, drm_driver, drm_kms
@@ -150,6 +151,7 @@
 #define DRIVER_IRQ_VBL2    0x800
 #define DRIVER_GEM         0x1000
 #define DRIVER_MODESET     0x2000
+#define DRIVER_PRIME       0x4000
 
 #define DRIVER_BUS_PCI 0x1
 #define DRIVER_BUS_PLATFORM 0x2
@@ -215,6 +217,11 @@
 		drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, 		\
 					 __func__, fmt, ##args);	\
 	} while (0)
+#define DRM_DEBUG_PRIME(fmt, args...)					\
+	do {								\
+		drm_ut_debug_printk(DRM_UT_PRIME, DRM_NAME,		\
+					__func__, fmt, ##args);		\
+	} while (0)
 #define DRM_LOG(fmt, args...)						\
 	do {								\
 		drm_ut_debug_printk(DRM_UT_CORE, NULL,			\
@@ -238,6 +245,7 @@
 #else
 #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
 #define DRM_DEBUG_KMS(fmt, args...)	do { } while (0)
+#define DRM_DEBUG_PRIME(fmt, args...)	do { } while (0)
 #define DRM_DEBUG(fmt, arg...)		 do { } while (0)
 #define DRM_LOG(fmt, arg...)		do { } while (0)
 #define DRM_LOG_KMS(fmt, args...) do { } while (0)
@@ -410,6 +418,12 @@
 	void (*destroy)(struct drm_pending_event *event);
 };
 
+/* initial implementaton using a linked list - todo hashtab */
+struct drm_prime_file_private {
+	struct list_head head;
+	struct mutex lock;
+};
+
 /** File private data */
 struct drm_file {
 	int authenticated;
@@ -437,6 +451,8 @@
 	wait_queue_head_t event_wait;
 	struct list_head event_list;
 	int event_space;
+
+	struct drm_prime_file_private prime;
 };
 
 /** Wait queue */
@@ -652,6 +668,12 @@
 	uint32_t pending_write_domain;
 
 	void *driver_private;
+
+	/* dma buf exported from this GEM object */
+	struct dma_buf *export_dma_buf;
+
+	/* dma buf attachment backing this object */
+	struct dma_buf_attachment *import_attach;
 };
 
 #include "drm_crtc.h"
@@ -890,6 +912,20 @@
 	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
 	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
 
+	/* prime: */
+	/* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+	int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+				uint32_t handle, uint32_t flags, int *prime_fd);
+	/* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+	int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+				int prime_fd, uint32_t *handle);
+	/* export GEM -> dmabuf */
+	struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+				struct drm_gem_object *obj, int flags);
+	/* import dmabuf -> GEM */
+	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+				struct dma_buf *dma_buf);
+
 	/* vga arb irq handler */
 	void (*vgaarb_irq)(struct drm_device *dev, bool state);
 
@@ -1509,6 +1545,32 @@
 extern int drm_clients_info(struct seq_file *m, void* data);
 extern int drm_gem_name_info(struct seq_file *m, void *data);
 
+
+extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+		int *prime_fd);
+extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+		struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+
+extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
+extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
+
+extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
+extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+
+int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
+int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
+			 struct drm_gem_object **obj);
+
 #if DRM_DEBUG_CODE
 extern int drm_vma_info(struct seq_file *m, void *data);
 #endif
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 3963116..e478de4 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -85,7 +85,7 @@
 struct drm_exynos_vidi_connection {
 	unsigned int connection;
 	unsigned int extensions;
-	uint64_t *edid;
+	uint64_t edid;
 };
 
 struct drm_exynos_plane_set_zpos {
@@ -96,7 +96,8 @@
 /* memory type definitions. */
 enum e_drm_exynos_gem_mem_type {
 	/* Physically Non-Continuous memory. */
-	EXYNOS_BO_NONCONTIG	= 1 << 0
+	EXYNOS_BO_NONCONTIG	= 1 << 0,
+	EXYNOS_BO_MASK		= EXYNOS_BO_NONCONTIG
 };
 
 #define DRM_EXYNOS_GEM_CREATE		0x00
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 0a0001b..923afb5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -44,4 +44,8 @@
 /* flag for GFDT type */
 #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
+#ifdef CONFIG_INTEL_IOMMU
+extern int intel_iommu_gfx_mapped;
+#endif
+
 #endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a4b5da2..3c9b616 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -3,6 +3,7 @@
 header-y += caif/
 header-y += dvb/
 header-y += hdlc/
+header-y += hsi/
 header-y += isdn/
 header-y += mmc/
 header-y += nfsd/
@@ -120,7 +121,6 @@
 header-y += errqueue.h
 header-y += ethtool.h
 header-y += eventpoll.h
-header-y += ext2_fs.h
 header-y += fadvise.h
 header-y += falloc.h
 header-y += fanotify.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f53fea6..f421dd8 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -372,4 +372,14 @@
 
 #endif	/* !CONFIG_ACPI */
 
+#ifdef CONFIG_ACPI
+void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
+			       u32 pm1a_ctrl,  u32 pm1b_ctrl));
+
+acpi_status acpi_os_prepare_sleep(u8 sleep_state,
+				  u32 pm1a_control, u32 pm1b_control);
+#else
+#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
+#endif
+
 #endif	/*_LINUX_ACPI_H*/
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 712abcc..6c26a3d 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -15,6 +15,7 @@
 #include <linux/list.h>
 #include <linux/kobject.h>
 #include <linux/completion.h>
+#include <linux/hrtimer.h>
 
 #define CPUIDLE_STATE_MAX	8
 #define CPUIDLE_NAME_LEN	16
@@ -43,12 +44,15 @@
 
 	unsigned int	flags;
 	unsigned int	exit_latency; /* in US */
-	unsigned int	power_usage; /* in mW */
+	int		power_usage; /* in mW */
 	unsigned int	target_residency; /* in US */
+	unsigned int    disable;
 
 	int (*enter)	(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			int index);
+
+	int (*enter_dead) (struct cpuidle_device *dev, int index);
 };
 
 /* Idle State Flags */
@@ -96,7 +100,6 @@
 	struct list_head 	device_list;
 	struct kobject		kobj;
 	struct completion	kobj_unregister;
-	void			*governor_data;
 };
 
 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -118,10 +121,12 @@
  ****************************/
 
 struct cpuidle_driver {
-	char			name[CPUIDLE_NAME_LEN];
+	const char		*name;
 	struct module 		*owner;
 
 	unsigned int		power_specified:1;
+	/* set to 1 to use the core cpuidle time keeping (for all states). */
+	unsigned int		en_core_tk_irqen:1;
 	struct cpuidle_state	states[CPUIDLE_STATE_MAX];
 	int			state_count;
 	int			safe_state_index;
@@ -140,6 +145,11 @@
 extern void cpuidle_resume_and_unlock(void);
 extern int cpuidle_enable_device(struct cpuidle_device *dev);
 extern void cpuidle_disable_device(struct cpuidle_device *dev);
+extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv, int index,
+				int (*enter)(struct cpuidle_device *dev,
+					struct cpuidle_driver *drv, int index));
+extern int cpuidle_play_dead(void);
 
 #else
 static inline void disable_cpuidle(void) { }
@@ -157,6 +167,12 @@
 static inline int cpuidle_enable_device(struct cpuidle_device *dev)
 {return -ENODEV; }
 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
+static inline int cpuidle_wrap_enter(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv, int index,
+				int (*enter)(struct cpuidle_device *dev,
+					struct cpuidle_driver *drv, int index))
+{ return -ENODEV; }
+static inline int cpuidle_play_dead(void) {return -ENODEV; }
 
 #endif
 
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1ffdb98..a2c819d 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -764,12 +764,6 @@
  *
  */
 #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-/* These strip const, as traditionally they weren't const. */
-#define cpu_possible_map	(*(cpumask_t *)cpu_possible_mask)
-#define cpu_online_map		(*(cpumask_t *)cpu_online_mask)
-#define cpu_present_map		(*(cpumask_t *)cpu_present_mask)
-#define cpu_active_map		(*(cpumask_t *)cpu_active_mask)
-
 #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
 
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
diff --git a/include/linux/cryptouser.h b/include/linux/cryptouser.h
index 532fb58..4abf2ea 100644
--- a/include/linux/cryptouser.h
+++ b/include/linux/cryptouser.h
@@ -100,3 +100,6 @@
 	char type[CRYPTO_MAX_NAME];
 	unsigned int seedsize;
 };
+
+#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
+			       sizeof(struct crypto_report_blkcipher))
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 71ad34e..547ab56 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -13,6 +13,8 @@
 enum dma_attr {
 	DMA_ATTR_WRITE_BARRIER,
 	DMA_ATTR_WEAK_ORDERING,
+	DMA_ATTR_WRITE_COMBINE,
+	DMA_ATTR_NON_CONSISTENT,
 	DMA_ATTR_MAX,
 };
 
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 5a736af..dfc099e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -9,10 +9,15 @@
 #include <linux/scatterlist.h>
 
 struct dma_map_ops {
-	void* (*alloc_coherent)(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t gfp);
-	void (*free_coherent)(struct device *dev, size_t size,
-			      void *vaddr, dma_addr_t dma_handle);
+	void* (*alloc)(struct device *dev, size_t size,
+				dma_addr_t *dma_handle, gfp_t gfp,
+				struct dma_attrs *attrs);
+	void (*free)(struct device *dev, size_t size,
+			      void *vaddr, dma_addr_t dma_handle,
+			      struct dma_attrs *attrs);
+	int (*mmap)(struct device *, struct vm_area_struct *,
+			  void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+
 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
 			       unsigned long offset, size_t size,
 			       enum dma_data_direction dir,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 676f967..f9a2e5e 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -974,6 +974,7 @@
 void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *net_dma_find_channel(void);
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
 
 /* --- Helper iov-locking functions --- */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e1d9e0e..f5647b5 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -896,8 +896,7 @@
  *
  * All operations are optional (i.e. the function pointer may be set
  * to %NULL) and callers must take this into account.  Callers must
- * hold the RTNL, except that for @get_drvinfo the caller may or may
- * not hold the RTNL.
+ * hold the RTNL lock.
  *
  * See the structures used by these operations for further documentation.
  *
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index ce1b719..2723e71 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -18,574 +18,25 @@
 
 #include <linux/types.h>
 #include <linux/magic.h>
-#include <linux/fs.h>
 
-/*
- * The second extended filesystem constants/structures
- */
-
-/*
- * Define EXT2FS_DEBUG to produce debug messages
- */
-#undef EXT2FS_DEBUG
-
-/*
- * Define EXT2_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT2_DEFAULT_RESERVE_BLOCKS     8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT2_MAX_RESERVE_BLOCKS         1027
-#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
-/*
- * The second extended file system version
- */
-#define EXT2FS_DATE		"95/08/09"
-#define EXT2FS_VERSION		"0.5b"
-
-/*
- * Debug code
- */
-#ifdef EXT2FS_DEBUG
-#	define ext2_debug(f, a...)	{ \
-					printk ("EXT2-fs DEBUG (%s, %d): %s:", \
-						__FILE__, __LINE__, __func__); \
-				  	printk (f, ## a); \
-					}
-#else
-#	define ext2_debug(f, a...)	/**/
-#endif
-
-/*
- * Special inode numbers
- */
-#define	EXT2_BAD_INO		 1	/* Bad blocks inode */
-#define EXT2_ROOT_INO		 2	/* Root inode */
-#define EXT2_BOOT_LOADER_INO	 5	/* Boot loader inode */
-#define EXT2_UNDEL_DIR_INO	 6	/* Undelete directory inode */
-
-/* First non-reserved inode for old ext2 filesystems */
-#define EXT2_GOOD_OLD_FIRST_INO	11
-
-#ifdef __KERNEL__
-#include <linux/ext2_fs_sb.h>
-static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-#else
-/* Assume that user mode programs are passing in an ext2fs superblock, not
- * a kernel struct super_block.  This will allow us to call the feature-test
- * macros from user land. */
-#define EXT2_SB(sb)	(sb)
-#endif
+#define EXT2_NAME_LEN 255
 
 /*
  * Maximal count of links to a file
  */
 #define EXT2_LINK_MAX		32000
 
-/*
- * Macro-instructions used to manage several block sizes
- */
-#define EXT2_MIN_BLOCK_SIZE		1024
-#define	EXT2_MAX_BLOCK_SIZE		4096
-#define EXT2_MIN_BLOCK_LOG_SIZE		  10
-#ifdef __KERNEL__
-# define EXT2_BLOCK_SIZE(s)		((s)->s_blocksize)
-#else
-# define EXT2_BLOCK_SIZE(s)		(EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size)
-#endif
-#define	EXT2_ADDR_PER_BLOCK(s)		(EXT2_BLOCK_SIZE(s) / sizeof (__u32))
-#ifdef __KERNEL__
-# define EXT2_BLOCK_SIZE_BITS(s)	((s)->s_blocksize_bits)
-#else
-# define EXT2_BLOCK_SIZE_BITS(s)	((s)->s_log_block_size + 10)
-#endif
-#ifdef __KERNEL__
-#define	EXT2_ADDR_PER_BLOCK_BITS(s)	(EXT2_SB(s)->s_addr_per_block_bits)
-#define EXT2_INODE_SIZE(s)		(EXT2_SB(s)->s_inode_size)
-#define EXT2_FIRST_INO(s)		(EXT2_SB(s)->s_first_ino)
-#else
-#define EXT2_INODE_SIZE(s)	(((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
-				 EXT2_GOOD_OLD_INODE_SIZE : \
-				 (s)->s_inode_size)
-#define EXT2_FIRST_INO(s)	(((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
-				 EXT2_GOOD_OLD_FIRST_INO : \
-				 (s)->s_first_ino)
-#endif
+#define EXT2_SB_MAGIC_OFFSET	0x38
+#define EXT2_SB_BLOCKS_OFFSET	0x04
+#define EXT2_SB_BSIZE_OFFSET	0x18
 
-/*
- * Macro-instructions used to manage fragments
- */
-#define EXT2_MIN_FRAG_SIZE		1024
-#define	EXT2_MAX_FRAG_SIZE		4096
-#define EXT2_MIN_FRAG_LOG_SIZE		  10
-#ifdef __KERNEL__
-# define EXT2_FRAG_SIZE(s)		(EXT2_SB(s)->s_frag_size)
-# define EXT2_FRAGS_PER_BLOCK(s)	(EXT2_SB(s)->s_frags_per_block)
-#else
-# define EXT2_FRAG_SIZE(s)		(EXT2_MIN_FRAG_SIZE << (s)->s_log_frag_size)
-# define EXT2_FRAGS_PER_BLOCK(s)	(EXT2_BLOCK_SIZE(s) / EXT2_FRAG_SIZE(s))
-#endif
-
-/*
- * Structure of a blocks group descriptor
- */
-struct ext2_group_desc
+static inline u64 ext2_image_size(void *ext2_sb)
 {
-	__le32	bg_block_bitmap;		/* Blocks bitmap block */
-	__le32	bg_inode_bitmap;		/* Inodes bitmap block */
-	__le32	bg_inode_table;		/* Inodes table block */
-	__le16	bg_free_blocks_count;	/* Free blocks count */
-	__le16	bg_free_inodes_count;	/* Free inodes count */
-	__le16	bg_used_dirs_count;	/* Directories count */
-	__le16	bg_pad;
-	__le32	bg_reserved[3];
-};
-
-/*
- * Macro-instructions used to manage group descriptors
- */
-#ifdef __KERNEL__
-# define EXT2_BLOCKS_PER_GROUP(s)	(EXT2_SB(s)->s_blocks_per_group)
-# define EXT2_DESC_PER_BLOCK(s)		(EXT2_SB(s)->s_desc_per_block)
-# define EXT2_INODES_PER_GROUP(s)	(EXT2_SB(s)->s_inodes_per_group)
-# define EXT2_DESC_PER_BLOCK_BITS(s)	(EXT2_SB(s)->s_desc_per_block_bits)
-#else
-# define EXT2_BLOCKS_PER_GROUP(s)	((s)->s_blocks_per_group)
-# define EXT2_DESC_PER_BLOCK(s)		(EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_group_desc))
-# define EXT2_INODES_PER_GROUP(s)	((s)->s_inodes_per_group)
-#endif
-
-/*
- * Constants relative to the data blocks
- */
-#define	EXT2_NDIR_BLOCKS		12
-#define	EXT2_IND_BLOCK			EXT2_NDIR_BLOCKS
-#define	EXT2_DIND_BLOCK			(EXT2_IND_BLOCK + 1)
-#define	EXT2_TIND_BLOCK			(EXT2_DIND_BLOCK + 1)
-#define	EXT2_N_BLOCKS			(EXT2_TIND_BLOCK + 1)
-
-/*
- * Inode flags (GETFLAGS/SETFLAGS)
- */
-#define	EXT2_SECRM_FL			FS_SECRM_FL	/* Secure deletion */
-#define	EXT2_UNRM_FL			FS_UNRM_FL	/* Undelete */
-#define	EXT2_COMPR_FL			FS_COMPR_FL	/* Compress file */
-#define EXT2_SYNC_FL			FS_SYNC_FL	/* Synchronous updates */
-#define EXT2_IMMUTABLE_FL		FS_IMMUTABLE_FL	/* Immutable file */
-#define EXT2_APPEND_FL			FS_APPEND_FL	/* writes to file may only append */
-#define EXT2_NODUMP_FL			FS_NODUMP_FL	/* do not dump file */
-#define EXT2_NOATIME_FL			FS_NOATIME_FL	/* do not update atime */
-/* Reserved for compression usage... */
-#define EXT2_DIRTY_FL			FS_DIRTY_FL
-#define EXT2_COMPRBLK_FL		FS_COMPRBLK_FL	/* One or more compressed clusters */
-#define EXT2_NOCOMP_FL			FS_NOCOMP_FL	/* Don't compress */
-#define EXT2_ECOMPR_FL			FS_ECOMPR_FL	/* Compression error */
-/* End compression flags --- maybe not all used */	
-#define EXT2_BTREE_FL			FS_BTREE_FL	/* btree format dir */
-#define EXT2_INDEX_FL			FS_INDEX_FL	/* hash-indexed directory */
-#define EXT2_IMAGIC_FL			FS_IMAGIC_FL	/* AFS directory */
-#define EXT2_JOURNAL_DATA_FL		FS_JOURNAL_DATA_FL /* Reserved for ext3 */
-#define EXT2_NOTAIL_FL			FS_NOTAIL_FL	/* file tail should not be merged */
-#define EXT2_DIRSYNC_FL			FS_DIRSYNC_FL	/* dirsync behaviour (directories only) */
-#define EXT2_TOPDIR_FL			FS_TOPDIR_FL	/* Top of directory hierarchies*/
-#define EXT2_RESERVED_FL		FS_RESERVED_FL	/* reserved for ext2 lib */
-
-#define EXT2_FL_USER_VISIBLE		FS_FL_USER_VISIBLE	/* User visible flags */
-#define EXT2_FL_USER_MODIFIABLE		FS_FL_USER_MODIFIABLE	/* User modifiable flags */
-
-/* Flags that should be inherited by new inodes from their parent. */
-#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
-			   EXT2_SYNC_FL | EXT2_NODUMP_FL |\
-			   EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
-			   EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
-			   EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
-
-/* Mask out flags that are inappropriate for the given type of inode. */
-static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
-{
-	if (S_ISDIR(mode))
-		return flags;
-	else if (S_ISREG(mode))
-		return flags & EXT2_REG_FLMASK;
-	else
-		return flags & EXT2_OTHER_FLMASK;
+	__u8 *p = ext2_sb;
+	if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC))
+		return 0;
+	return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) <<
+		le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET));
 }
 
-/*
- * ioctl commands
- */
-#define	EXT2_IOC_GETFLAGS		FS_IOC_GETFLAGS
-#define	EXT2_IOC_SETFLAGS		FS_IOC_SETFLAGS
-#define	EXT2_IOC_GETVERSION		FS_IOC_GETVERSION
-#define	EXT2_IOC_SETVERSION		FS_IOC_SETVERSION
-#define	EXT2_IOC_GETRSVSZ		_IOR('f', 5, long)
-#define	EXT2_IOC_SETRSVSZ		_IOW('f', 6, long)
-
-/*
- * ioctl commands in 32 bit emulation
- */
-#define EXT2_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
-#define EXT2_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
-#define EXT2_IOC32_GETVERSION		FS_IOC32_GETVERSION
-#define EXT2_IOC32_SETVERSION		FS_IOC32_SETVERSION
-
-/*
- * Structure of an inode on the disk
- */
-struct ext2_inode {
-	__le16	i_mode;		/* File mode */
-	__le16	i_uid;		/* Low 16 bits of Owner Uid */
-	__le32	i_size;		/* Size in bytes */
-	__le32	i_atime;	/* Access time */
-	__le32	i_ctime;	/* Creation time */
-	__le32	i_mtime;	/* Modification time */
-	__le32	i_dtime;	/* Deletion Time */
-	__le16	i_gid;		/* Low 16 bits of Group Id */
-	__le16	i_links_count;	/* Links count */
-	__le32	i_blocks;	/* Blocks count */
-	__le32	i_flags;	/* File flags */
-	union {
-		struct {
-			__le32  l_i_reserved1;
-		} linux1;
-		struct {
-			__le32  h_i_translator;
-		} hurd1;
-		struct {
-			__le32  m_i_reserved1;
-		} masix1;
-	} osd1;				/* OS dependent 1 */
-	__le32	i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
-	__le32	i_generation;	/* File version (for NFS) */
-	__le32	i_file_acl;	/* File ACL */
-	__le32	i_dir_acl;	/* Directory ACL */
-	__le32	i_faddr;	/* Fragment address */
-	union {
-		struct {
-			__u8	l_i_frag;	/* Fragment number */
-			__u8	l_i_fsize;	/* Fragment size */
-			__u16	i_pad1;
-			__le16	l_i_uid_high;	/* these 2 fields    */
-			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
-		} linux2;
-		struct {
-			__u8	h_i_frag;	/* Fragment number */
-			__u8	h_i_fsize;	/* Fragment size */
-			__le16	h_i_mode_high;
-			__le16	h_i_uid_high;
-			__le16	h_i_gid_high;
-			__le32	h_i_author;
-		} hurd2;
-		struct {
-			__u8	m_i_frag;	/* Fragment number */
-			__u8	m_i_fsize;	/* Fragment size */
-			__u16	m_pad1;
-			__u32	m_i_reserved2[2];
-		} masix2;
-	} osd2;				/* OS dependent 2 */
-};
-
-#define i_size_high	i_dir_acl
-
-#if defined(__KERNEL__) || defined(__linux__)
-#define i_reserved1	osd1.linux1.l_i_reserved1
-#define i_frag		osd2.linux2.l_i_frag
-#define i_fsize		osd2.linux2.l_i_fsize
-#define i_uid_low	i_uid
-#define i_gid_low	i_gid
-#define i_uid_high	osd2.linux2.l_i_uid_high
-#define i_gid_high	osd2.linux2.l_i_gid_high
-#define i_reserved2	osd2.linux2.l_i_reserved2
-#endif
-
-#ifdef	__hurd__
-#define i_translator	osd1.hurd1.h_i_translator
-#define i_frag		osd2.hurd2.h_i_frag
-#define i_fsize		osd2.hurd2.h_i_fsize
-#define i_uid_high	osd2.hurd2.h_i_uid_high
-#define i_gid_high	osd2.hurd2.h_i_gid_high
-#define i_author	osd2.hurd2.h_i_author
-#endif
-
-#ifdef	__masix__
-#define i_reserved1	osd1.masix1.m_i_reserved1
-#define i_frag		osd2.masix2.m_i_frag
-#define i_fsize		osd2.masix2.m_i_fsize
-#define i_reserved2	osd2.masix2.m_i_reserved2
-#endif
-
-/*
- * File system states
- */
-#define	EXT2_VALID_FS			0x0001	/* Unmounted cleanly */
-#define	EXT2_ERROR_FS			0x0002	/* Errors detected */
-
-/*
- * Mount flags
- */
-#define EXT2_MOUNT_CHECK		0x000001  /* Do mount-time checks */
-#define EXT2_MOUNT_OLDALLOC		0x000002  /* Don't use the new Orlov allocator */
-#define EXT2_MOUNT_GRPID		0x000004  /* Create files with directory's group */
-#define EXT2_MOUNT_DEBUG		0x000008  /* Some debugging messages */
-#define EXT2_MOUNT_ERRORS_CONT		0x000010  /* Continue on errors */
-#define EXT2_MOUNT_ERRORS_RO		0x000020  /* Remount fs ro on errors */
-#define EXT2_MOUNT_ERRORS_PANIC		0x000040  /* Panic on errors */
-#define EXT2_MOUNT_MINIX_DF		0x000080  /* Mimics the Minix statfs */
-#define EXT2_MOUNT_NOBH			0x000100  /* No buffer_heads */
-#define EXT2_MOUNT_NO_UID32		0x000200  /* Disable 32-bit UIDs */
-#define EXT2_MOUNT_XATTR_USER		0x004000  /* Extended user attributes */
-#define EXT2_MOUNT_POSIX_ACL		0x008000  /* POSIX Access Control Lists */
-#define EXT2_MOUNT_XIP			0x010000  /* Execute in place */
-#define EXT2_MOUNT_USRQUOTA		0x020000  /* user quota */
-#define EXT2_MOUNT_GRPQUOTA		0x040000  /* group quota */
-#define EXT2_MOUNT_RESERVATION		0x080000  /* Preallocation */
-
-
-#define clear_opt(o, opt)		o &= ~EXT2_MOUNT_##opt
-#define set_opt(o, opt)			o |= EXT2_MOUNT_##opt
-#define test_opt(sb, opt)		(EXT2_SB(sb)->s_mount_opt & \
-					 EXT2_MOUNT_##opt)
-/*
- * Maximal mount counts between two filesystem checks
- */
-#define EXT2_DFL_MAX_MNT_COUNT		20	/* Allow 20 mounts */
-#define EXT2_DFL_CHECKINTERVAL		0	/* Don't use interval check */
-
-/*
- * Behaviour when detecting errors
- */
-#define EXT2_ERRORS_CONTINUE		1	/* Continue execution */
-#define EXT2_ERRORS_RO			2	/* Remount fs read-only */
-#define EXT2_ERRORS_PANIC		3	/* Panic */
-#define EXT2_ERRORS_DEFAULT		EXT2_ERRORS_CONTINUE
-
-/*
- * Structure of the super block
- */
-struct ext2_super_block {
-	__le32	s_inodes_count;		/* Inodes count */
-	__le32	s_blocks_count;		/* Blocks count */
-	__le32	s_r_blocks_count;	/* Reserved blocks count */
-	__le32	s_free_blocks_count;	/* Free blocks count */
-	__le32	s_free_inodes_count;	/* Free inodes count */
-	__le32	s_first_data_block;	/* First Data Block */
-	__le32	s_log_block_size;	/* Block size */
-	__le32	s_log_frag_size;	/* Fragment size */
-	__le32	s_blocks_per_group;	/* # Blocks per group */
-	__le32	s_frags_per_group;	/* # Fragments per group */
-	__le32	s_inodes_per_group;	/* # Inodes per group */
-	__le32	s_mtime;		/* Mount time */
-	__le32	s_wtime;		/* Write time */
-	__le16	s_mnt_count;		/* Mount count */
-	__le16	s_max_mnt_count;	/* Maximal mount count */
-	__le16	s_magic;		/* Magic signature */
-	__le16	s_state;		/* File system state */
-	__le16	s_errors;		/* Behaviour when detecting errors */
-	__le16	s_minor_rev_level; 	/* minor revision level */
-	__le32	s_lastcheck;		/* time of last check */
-	__le32	s_checkinterval;	/* max. time between checks */
-	__le32	s_creator_os;		/* OS */
-	__le32	s_rev_level;		/* Revision level */
-	__le16	s_def_resuid;		/* Default uid for reserved blocks */
-	__le16	s_def_resgid;		/* Default gid for reserved blocks */
-	/*
-	 * These fields are for EXT2_DYNAMIC_REV superblocks only.
-	 *
-	 * Note: the difference between the compatible feature set and
-	 * the incompatible feature set is that if there is a bit set
-	 * in the incompatible feature set that the kernel doesn't
-	 * know about, it should refuse to mount the filesystem.
-	 * 
-	 * e2fsck's requirements are more strict; if it doesn't know
-	 * about a feature in either the compatible or incompatible
-	 * feature set, it must abort and not try to meddle with
-	 * things it doesn't understand...
-	 */
-	__le32	s_first_ino; 		/* First non-reserved inode */
-	__le16   s_inode_size; 		/* size of inode structure */
-	__le16	s_block_group_nr; 	/* block group # of this superblock */
-	__le32	s_feature_compat; 	/* compatible feature set */
-	__le32	s_feature_incompat; 	/* incompatible feature set */
-	__le32	s_feature_ro_compat; 	/* readonly-compatible feature set */
-	__u8	s_uuid[16];		/* 128-bit uuid for volume */
-	char	s_volume_name[16]; 	/* volume name */
-	char	s_last_mounted[64]; 	/* directory where last mounted */
-	__le32	s_algorithm_usage_bitmap; /* For compression */
-	/*
-	 * Performance hints.  Directory preallocation should only
-	 * happen if the EXT2_COMPAT_PREALLOC flag is on.
-	 */
-	__u8	s_prealloc_blocks;	/* Nr of blocks to try to preallocate*/
-	__u8	s_prealloc_dir_blocks;	/* Nr to preallocate for dirs */
-	__u16	s_padding1;
-	/*
-	 * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
-	 */
-	__u8	s_journal_uuid[16];	/* uuid of journal superblock */
-	__u32	s_journal_inum;		/* inode number of journal file */
-	__u32	s_journal_dev;		/* device number of journal file */
-	__u32	s_last_orphan;		/* start of list of inodes to delete */
-	__u32	s_hash_seed[4];		/* HTREE hash seed */
-	__u8	s_def_hash_version;	/* Default hash version to use */
-	__u8	s_reserved_char_pad;
-	__u16	s_reserved_word_pad;
-	__le32	s_default_mount_opts;
- 	__le32	s_first_meta_bg; 	/* First metablock block group */
-	__u32	s_reserved[190];	/* Padding to the end of the block */
-};
-
-/*
- * Codes for operating systems
- */
-#define EXT2_OS_LINUX		0
-#define EXT2_OS_HURD		1
-#define EXT2_OS_MASIX		2
-#define EXT2_OS_FREEBSD		3
-#define EXT2_OS_LITES		4
-
-/*
- * Revision levels
- */
-#define EXT2_GOOD_OLD_REV	0	/* The good old (original) format */
-#define EXT2_DYNAMIC_REV	1 	/* V2 format w/ dynamic inode sizes */
-
-#define EXT2_CURRENT_REV	EXT2_GOOD_OLD_REV
-#define EXT2_MAX_SUPP_REV	EXT2_DYNAMIC_REV
-
-#define EXT2_GOOD_OLD_INODE_SIZE 128
-
-/*
- * Feature set definitions
- */
-
-#define EXT2_HAS_COMPAT_FEATURE(sb,mask)			\
-	( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
-#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask)			\
-	( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
-#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask)			\
-	( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
-#define EXT2_SET_COMPAT_FEATURE(sb,mask)			\
-	EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT2_SET_INCOMPAT_FEATURE(sb,mask)			\
-	EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask)			\
-	EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask)			\
-	EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
-#define EXT2_FEATURE_COMPAT_DIR_PREALLOC	0x0001
-#define EXT2_FEATURE_COMPAT_IMAGIC_INODES	0x0002
-#define EXT3_FEATURE_COMPAT_HAS_JOURNAL		0x0004
-#define EXT2_FEATURE_COMPAT_EXT_ATTR		0x0008
-#define EXT2_FEATURE_COMPAT_RESIZE_INO		0x0010
-#define EXT2_FEATURE_COMPAT_DIR_INDEX		0x0020
-#define EXT2_FEATURE_COMPAT_ANY			0xffffffff
-
-#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER	0x0001
-#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE	0x0002
-#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR	0x0004
-#define EXT2_FEATURE_RO_COMPAT_ANY		0xffffffff
-
-#define EXT2_FEATURE_INCOMPAT_COMPRESSION	0x0001
-#define EXT2_FEATURE_INCOMPAT_FILETYPE		0x0002
-#define EXT3_FEATURE_INCOMPAT_RECOVER		0x0004
-#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV	0x0008
-#define EXT2_FEATURE_INCOMPAT_META_BG		0x0010
-#define EXT2_FEATURE_INCOMPAT_ANY		0xffffffff
-
-#define EXT2_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
-#define EXT2_FEATURE_INCOMPAT_SUPP	(EXT2_FEATURE_INCOMPAT_FILETYPE| \
-					 EXT2_FEATURE_INCOMPAT_META_BG)
-#define EXT2_FEATURE_RO_COMPAT_SUPP	(EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-					 EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
-					 EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
-#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED	~EXT2_FEATURE_RO_COMPAT_SUPP
-#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED	~EXT2_FEATURE_INCOMPAT_SUPP
-
-/*
- * Default values for user and/or group using reserved blocks
- */
-#define	EXT2_DEF_RESUID		0
-#define	EXT2_DEF_RESGID		0
-
-/*
- * Default mount options
- */
-#define EXT2_DEFM_DEBUG		0x0001
-#define EXT2_DEFM_BSDGROUPS	0x0002
-#define EXT2_DEFM_XATTR_USER	0x0004
-#define EXT2_DEFM_ACL		0x0008
-#define EXT2_DEFM_UID16		0x0010
-    /* Not used by ext2, but reserved for use by ext3 */
-#define EXT3_DEFM_JMODE		0x0060 
-#define EXT3_DEFM_JMODE_DATA	0x0020
-#define EXT3_DEFM_JMODE_ORDERED	0x0040
-#define EXT3_DEFM_JMODE_WBACK	0x0060
-
-/*
- * Structure of a directory entry
- */
-#define EXT2_NAME_LEN 255
-
-struct ext2_dir_entry {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__le16	name_len;		/* Name length */
-	char	name[EXT2_NAME_LEN];	/* File name */
-};
-
-/*
- * The new version of the directory entry.  Since EXT2 structures are
- * stored in intel byte order, and the name_len field could never be
- * bigger than 255 chars, it's safe to reclaim the extra byte for the
- * file_type field.
- */
-struct ext2_dir_entry_2 {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__u8	name_len;		/* Name length */
-	__u8	file_type;
-	char	name[EXT2_NAME_LEN];	/* File name */
-};
-
-/*
- * Ext2 directory file types.  Only the low 3 bits are used.  The
- * other bits are reserved for now.
- */
-enum {
-	EXT2_FT_UNKNOWN		= 0,
-	EXT2_FT_REG_FILE	= 1,
-	EXT2_FT_DIR		= 2,
-	EXT2_FT_CHRDEV		= 3,
-	EXT2_FT_BLKDEV		= 4,
-	EXT2_FT_FIFO		= 5,
-	EXT2_FT_SOCK		= 6,
-	EXT2_FT_SYMLINK		= 7,
-	EXT2_FT_MAX
-};
-
-/*
- * EXT2_DIR_PAD defines the directory entries boundaries
- *
- * NOTE: It must be a multiple of 4
- */
-#define EXT2_DIR_PAD		 	4
-#define EXT2_DIR_ROUND 			(EXT2_DIR_PAD - 1)
-#define EXT2_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT2_DIR_ROUND) & \
-					 ~EXT2_DIR_ROUND)
-#define EXT2_MAX_REC_LEN		((1<<16)-1)
-
 #endif	/* _LINUX_EXT2_FS_H */
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
deleted file mode 100644
index db4d9f5..0000000
--- a/include/linux/ext2_fs_sb.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *  linux/include/linux/ext2_fs_sb.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs_sb.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#ifndef _LINUX_EXT2_FS_SB
-#define _LINUX_EXT2_FS_SB
-
-#include <linux/blockgroup_lock.h>
-#include <linux/percpu_counter.h>
-#include <linux/rbtree.h>
-
-/* XXX Here for now... not interested in restructing headers JUST now */
-
-/* data type for block offset of block group */
-typedef int ext2_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext2_fsblk_t;
-
-#define E2FSBLK "%lu"
-
-struct ext2_reserve_window {
-	ext2_fsblk_t		_rsv_start;	/* First byte reserved */
-	ext2_fsblk_t		_rsv_end;	/* Last byte reserved or 0 */
-};
-
-struct ext2_reserve_window_node {
-	struct rb_node	 	rsv_node;
-	__u32			rsv_goal_size;
-	__u32			rsv_alloc_hit;
-	struct ext2_reserve_window	rsv_window;
-};
-
-struct ext2_block_alloc_info {
-	/* information about reservation window */
-	struct ext2_reserve_window_node	rsv_window_node;
-	/*
-	 * was i_next_alloc_block in ext2_inode_info
-	 * is the logical (file-relative) number of the
-	 * most-recently-allocated block in this file.
-	 * We use this for detecting linearly ascending allocation requests.
-	 */
-	__u32			last_alloc_logical_block;
-	/*
-	 * Was i_next_alloc_goal in ext2_inode_info
-	 * is the *physical* companion to i_next_alloc_block.
-	 * it the the physical block number of the block which was most-recentl
-	 * allocated to this file.  This give us the goal (target) for the next
-	 * allocation when we detect linearly ascending requests.
-	 */
-	ext2_fsblk_t		last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * second extended-fs super-block data in memory
- */
-struct ext2_sb_info {
-	unsigned long s_frag_size;	/* Size of a fragment in bytes */
-	unsigned long s_frags_per_block;/* Number of fragments per block */
-	unsigned long s_inodes_per_block;/* Number of inodes per block */
-	unsigned long s_frags_per_group;/* Number of fragments in a group */
-	unsigned long s_blocks_per_group;/* Number of blocks in a group */
-	unsigned long s_inodes_per_group;/* Number of inodes in a group */
-	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
-	unsigned long s_gdb_count;	/* Number of group descriptor blocks */
-	unsigned long s_desc_per_block;	/* Number of group descriptors per block */
-	unsigned long s_groups_count;	/* Number of groups in the fs */
-	unsigned long s_overhead_last;  /* Last calculated overhead */
-	unsigned long s_blocks_last;    /* Last seen block count */
-	struct buffer_head * s_sbh;	/* Buffer containing the super block */
-	struct ext2_super_block * s_es;	/* Pointer to the super block in the buffer */
-	struct buffer_head ** s_group_desc;
-	unsigned long  s_mount_opt;
-	unsigned long s_sb_block;
-	uid_t s_resuid;
-	gid_t s_resgid;
-	unsigned short s_mount_state;
-	unsigned short s_pad;
-	int s_addr_per_block_bits;
-	int s_desc_per_block_bits;
-	int s_inode_size;
-	int s_first_ino;
-	spinlock_t s_next_gen_lock;
-	u32 s_next_generation;
-	unsigned long s_dir_count;
-	u8 *s_debts;
-	struct percpu_counter s_freeblocks_counter;
-	struct percpu_counter s_freeinodes_counter;
-	struct percpu_counter s_dirs_counter;
-	struct blockgroup_lock *s_blockgroup_lock;
-	/* root of the per fs reservation window tree */
-	spinlock_t s_rsv_window_lock;
-	struct rb_root s_rsv_window_root;
-	struct ext2_reserve_window_node s_rsv_window_head;
-	/*
-	 * s_lock protects against concurrent modifications of s_mount_state,
-	 * s_blocks_last, s_overhead_last and the content of superblock's
-	 * buffer pointed to by sbi->s_es.
-	 *
-	 * Note: It is used in ext2_show_options() to provide a consistent view
-	 * of the mount options.
-	 */
-	spinlock_t s_lock;
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
-{
-	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-#endif	/* _LINUX_EXT2_FS_SB */
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
deleted file mode 100644
index f42c098..0000000
--- a/include/linux/ext3_fs_i.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- *  linux/include/linux/ext3_fs_i.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs_i.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#ifndef _LINUX_EXT3_FS_I
-#define _LINUX_EXT3_FS_I
-
-#include <linux/rwsem.h>
-#include <linux/rbtree.h>
-#include <linux/seqlock.h>
-#include <linux/mutex.h>
-
-/* data type for block offset of block group */
-typedef int ext3_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext3_fsblk_t;
-
-#define E3FSBLK "%lu"
-
-struct ext3_reserve_window {
-	ext3_fsblk_t	_rsv_start;	/* First byte reserved */
-	ext3_fsblk_t	_rsv_end;	/* Last byte reserved or 0 */
-};
-
-struct ext3_reserve_window_node {
-	struct rb_node		rsv_node;
-	__u32			rsv_goal_size;
-	__u32			rsv_alloc_hit;
-	struct ext3_reserve_window	rsv_window;
-};
-
-struct ext3_block_alloc_info {
-	/* information about reservation window */
-	struct ext3_reserve_window_node	rsv_window_node;
-	/*
-	 * was i_next_alloc_block in ext3_inode_info
-	 * is the logical (file-relative) number of the
-	 * most-recently-allocated block in this file.
-	 * We use this for detecting linearly ascending allocation requests.
-	 */
-	__u32                   last_alloc_logical_block;
-	/*
-	 * Was i_next_alloc_goal in ext3_inode_info
-	 * is the *physical* companion to i_next_alloc_block.
-	 * it the physical block number of the block which was most-recentl
-	 * allocated to this file.  This give us the goal (target) for the next
-	 * allocation when we detect linearly ascending requests.
-	 */
-	ext3_fsblk_t		last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * third extended file system inode data in memory
- */
-struct ext3_inode_info {
-	__le32	i_data[15];	/* unconverted */
-	__u32	i_flags;
-#ifdef EXT3_FRAGMENTS
-	__u32	i_faddr;
-	__u8	i_frag_no;
-	__u8	i_frag_size;
-#endif
-	ext3_fsblk_t	i_file_acl;
-	__u32	i_dir_acl;
-	__u32	i_dtime;
-
-	/*
-	 * i_block_group is the number of the block group which contains
-	 * this file's inode.  Constant across the lifetime of the inode,
-	 * it is ued for making block allocation decisions - we try to
-	 * place a file's data blocks near its inode block, and new inodes
-	 * near to their parent directory's inode.
-	 */
-	__u32	i_block_group;
-	unsigned long	i_state_flags;	/* Dynamic state flags for ext3 */
-
-	/* block reservation info */
-	struct ext3_block_alloc_info *i_block_alloc_info;
-
-	__u32	i_dir_start_lookup;
-#ifdef CONFIG_EXT3_FS_XATTR
-	/*
-	 * Extended attributes can be read independently of the main file
-	 * data. Taking i_mutex even when reading would cause contention
-	 * between readers of EAs and writers of regular file data, so
-	 * instead we synchronize on xattr_sem when reading or changing
-	 * EAs.
-	 */
-	struct rw_semaphore xattr_sem;
-#endif
-
-	struct list_head i_orphan;	/* unlinked but open inodes */
-
-	/*
-	 * i_disksize keeps track of what the inode size is ON DISK, not
-	 * in memory.  During truncate, i_size is set to the new size by
-	 * the VFS prior to calling ext3_truncate(), but the filesystem won't
-	 * set i_disksize to 0 until the truncate is actually under way.
-	 *
-	 * The intent is that i_disksize always represents the blocks which
-	 * are used by this file.  This allows recovery to restart truncate
-	 * on orphans if we crash during truncate.  We actually write i_disksize
-	 * into the on-disk inode when writing inodes out, instead of i_size.
-	 *
-	 * The only time when i_disksize and i_size may be different is when
-	 * a truncate is in progress.  The only things which change i_disksize
-	 * are ext3_get_block (growth) and ext3_truncate (shrinkth).
-	 */
-	loff_t	i_disksize;
-
-	/* on-disk additional length */
-	__u16 i_extra_isize;
-
-	/*
-	 * truncate_mutex is for serialising ext3_truncate() against
-	 * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
-	 * data tree are chopped off during truncate. We can't do that in
-	 * ext3 because whenever we perform intermediate commits during
-	 * truncate, the inode and all the metadata blocks *must* be in a
-	 * consistent state which allows truncation of the orphans to restart
-	 * during recovery.  Hence we must fix the get_block-vs-truncate race
-	 * by other means, so we have truncate_mutex.
-	 */
-	struct mutex truncate_mutex;
-
-	/*
-	 * Transactions that contain inode's metadata needed to complete
-	 * fsync and fdatasync, respectively.
-	 */
-	atomic_t i_sync_tid;
-	atomic_t i_datasync_tid;
-
-	struct inode vfs_inode;
-};
-
-#endif	/* _LINUX_EXT3_FS_I */
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
deleted file mode 100644
index 6436525..0000000
--- a/include/linux/ext3_fs_sb.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- *  linux/include/linux/ext3_fs_sb.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs_sb.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#ifndef _LINUX_EXT3_FS_SB
-#define _LINUX_EXT3_FS_SB
-
-#ifdef __KERNEL__
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/blockgroup_lock.h>
-#include <linux/percpu_counter.h>
-#endif
-#include <linux/rbtree.h>
-
-/*
- * third extended-fs super-block data in memory
- */
-struct ext3_sb_info {
-	unsigned long s_frag_size;	/* Size of a fragment in bytes */
-	unsigned long s_frags_per_block;/* Number of fragments per block */
-	unsigned long s_inodes_per_block;/* Number of inodes per block */
-	unsigned long s_frags_per_group;/* Number of fragments in a group */
-	unsigned long s_blocks_per_group;/* Number of blocks in a group */
-	unsigned long s_inodes_per_group;/* Number of inodes in a group */
-	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
-	unsigned long s_gdb_count;	/* Number of group descriptor blocks */
-	unsigned long s_desc_per_block;	/* Number of group descriptors per block */
-	unsigned long s_groups_count;	/* Number of groups in the fs */
-	unsigned long s_overhead_last;  /* Last calculated overhead */
-	unsigned long s_blocks_last;    /* Last seen block count */
-	struct buffer_head * s_sbh;	/* Buffer containing the super block */
-	struct ext3_super_block * s_es;	/* Pointer to the super block in the buffer */
-	struct buffer_head ** s_group_desc;
-	unsigned long  s_mount_opt;
-	ext3_fsblk_t s_sb_block;
-	uid_t s_resuid;
-	gid_t s_resgid;
-	unsigned short s_mount_state;
-	unsigned short s_pad;
-	int s_addr_per_block_bits;
-	int s_desc_per_block_bits;
-	int s_inode_size;
-	int s_first_ino;
-	spinlock_t s_next_gen_lock;
-	u32 s_next_generation;
-	u32 s_hash_seed[4];
-	int s_def_hash_version;
-	int s_hash_unsigned;	/* 3 if hash should be signed, 0 if not */
-	struct percpu_counter s_freeblocks_counter;
-	struct percpu_counter s_freeinodes_counter;
-	struct percpu_counter s_dirs_counter;
-	struct blockgroup_lock *s_blockgroup_lock;
-
-	/* root of the per fs reservation window tree */
-	spinlock_t s_rsv_window_lock;
-	struct rb_root s_rsv_window_root;
-	struct ext3_reserve_window_node s_rsv_window_head;
-
-	/* Journaling */
-	struct inode * s_journal_inode;
-	struct journal_s * s_journal;
-	struct list_head s_orphan;
-	struct mutex s_orphan_lock;
-	struct mutex s_resize_lock;
-	unsigned long s_commit_interval;
-	struct block_device *journal_bdev;
-#ifdef CONFIG_QUOTA
-	char *s_qf_names[MAXQUOTAS];		/* Names of quota files with journalled quota */
-	int s_jquota_fmt;			/* Format of quota to use */
-#endif
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
-{
-	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-#endif	/* _LINUX_EXT3_FS_SB */
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
deleted file mode 100644
index d7b5ddc..0000000
--- a/include/linux/ext3_jbd.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * linux/include/linux/ext3_jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Ext3-specific journaling extensions.
- */
-
-#ifndef _LINUX_EXT3_JBD_H
-#define _LINUX_EXT3_JBD_H
-
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-
-#define EXT3_JOURNAL(inode)	(EXT3_SB((inode)->i_sb)->s_journal)
-
-/* Define the number of blocks we need to account to a transaction to
- * modify one block of data.
- *
- * We may have to touch one inode, one bitmap buffer, up to three
- * indirection blocks, the group and superblock summaries, and the data
- * block to complete the transaction.  */
-
-#define EXT3_SINGLEDATA_TRANS_BLOCKS	8U
-
-/* Extended attribute operations touch at most two data buffers,
- * two bitmap buffers, and two group summaries, in addition to the inode
- * and the superblock, which are already accounted for. */
-
-#define EXT3_XATTR_TRANS_BLOCKS		6U
-
-/* Define the minimum size for a transaction which modifies data.  This
- * needs to take into account the fact that we may end up modifying two
- * quota files too (one for the group, one for the user quota).  The
- * superblock only gets updated once, of course, so don't bother
- * counting that again for the quota updates. */
-
-#define EXT3_DATA_TRANS_BLOCKS(sb)	(EXT3_SINGLEDATA_TRANS_BLOCKS + \
-					 EXT3_XATTR_TRANS_BLOCKS - 2 + \
-					 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
-
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
- * generous.  We can grow the delete transaction later if necessary. */
-
-#define EXT3_DELETE_TRANS_BLOCKS(sb)   (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
-
-/* Define an arbitrary limit for the amount of data we will anticipate
- * writing to any given transaction.  For unbounded transactions such as
- * write(2) and truncate(2) we can write more than this, but we always
- * start off at the maximum transaction size and grow the transaction
- * optimistically as we go. */
-
-#define EXT3_MAX_TRANS_DATA		64U
-
-/* We break up a large truncate or write transaction once the handle's
- * buffer credits gets this low, we need either to extend the
- * transaction or to start a new one.  Reserve enough space here for
- * inode, bitmap, superblock, group and indirection updates for at least
- * one block, plus two quota updates.  Quota allocations are not
- * needed. */
-
-#define EXT3_RESERVE_TRANS_BLOCKS	12U
-
-#define EXT3_INDEX_EXTRA_TRANS_BLOCKS	8
-
-#ifdef CONFIG_QUOTA
-/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
-/* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
-#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
-#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
-#else
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
-#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
-#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
-#endif
-#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
-
-int
-ext3_mark_iloc_dirty(handle_t *handle,
-		     struct inode *inode,
-		     struct ext3_iloc *iloc);
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-			struct ext3_iloc *iloc);
-
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
-
-/*
- * Wrapper functions with which ext3 calls into JBD.  The intent here is
- * to allow these to be turned into appropriate stubs so ext3 can control
- * ext2 filesystems, so ext2+ext3 systems only nee one fs.  This work hasn't
- * been done yet.
- */
-
-static inline void ext3_journal_release_buffer(handle_t *handle,
-						struct buffer_head *bh)
-{
-	journal_release_buffer(handle, bh);
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-				unsigned long blocknr, struct buffer_head *bh);
-
-int __ext3_journal_get_create_access(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-int __ext3_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-#define ext3_journal_get_undo_access(handle, bh) \
-	__ext3_journal_get_undo_access(__func__, (handle), (bh))
-#define ext3_journal_get_write_access(handle, bh) \
-	__ext3_journal_get_write_access(__func__, (handle), (bh))
-#define ext3_journal_revoke(handle, blocknr, bh) \
-	__ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
-#define ext3_journal_get_create_access(handle, bh) \
-	__ext3_journal_get_create_access(__func__, (handle), (bh))
-#define ext3_journal_dirty_metadata(handle, bh) \
-	__ext3_journal_dirty_metadata(__func__, (handle), (bh))
-#define ext3_journal_forget(handle, bh) \
-	__ext3_journal_forget(__func__, (handle), (bh))
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
-
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
-int __ext3_journal_stop(const char *where, handle_t *handle);
-
-static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
-{
-	return ext3_journal_start_sb(inode->i_sb, nblocks);
-}
-
-#define ext3_journal_stop(handle) \
-	__ext3_journal_stop(__func__, (handle))
-
-static inline handle_t *ext3_journal_current_handle(void)
-{
-	return journal_current_handle();
-}
-
-static inline int ext3_journal_extend(handle_t *handle, int nblocks)
-{
-	return journal_extend(handle, nblocks);
-}
-
-static inline int ext3_journal_restart(handle_t *handle, int nblocks)
-{
-	return journal_restart(handle, nblocks);
-}
-
-static inline int ext3_journal_blocks_per_page(struct inode *inode)
-{
-	return journal_blocks_per_page(inode);
-}
-
-static inline int ext3_journal_force_commit(journal_t *journal)
-{
-	return journal_force_commit(journal);
-}
-
-/* super.c */
-int ext3_force_commit(struct super_block *sb);
-
-static inline int ext3_should_journal_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 1;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
-		return 1;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_order_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_writeback_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
-		return 1;
-	return 0;
-}
-
-#endif	/* _LINUX_EXT3_JBD_H */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 4db7b68..cdc9b71 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -2,6 +2,7 @@
 #define _LINUX_FIREWIRE_H
 
 #include <linux/completion.h>
+#include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
@@ -64,8 +65,6 @@
 #define CSR_MODEL		0x17
 #define CSR_DIRECTORY_ID	0x20
 
-struct device;
-
 struct fw_csr_iterator {
 	const u32 *p;
 	const u32 *end;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 135693e..8de6755 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1215,6 +1215,7 @@
 extern int lease_modify(struct file_lock **, int);
 extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
 extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
+extern void locks_delete_block(struct file_lock *waiter);
 extern void lock_flocks(void);
 extern void unlock_flocks(void);
 #else /* !CONFIG_FILE_LOCKING */
@@ -1359,6 +1360,10 @@
 	return 1;
 }
 
+static inline void locks_delete_block(struct file_lock *waiter)
+{
+}
+
 static inline void lock_flocks(void)
 {
 }
@@ -2506,6 +2511,7 @@
 extern int simple_setattr(struct dentry *, struct iattr *);
 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int simple_statfs(struct dentry *, struct kstatfs *);
+extern int simple_open(struct inode *inode, struct file *file);
 extern int simple_link(struct dentry *, struct inode *, struct dentry *);
 extern int simple_unlink(struct inode *, struct dentry *);
 extern int simple_rmdir(struct inode *, struct dentry *);
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/include/linux/fsl/mxs-dma.h
similarity index 100%
rename from arch/arm/mach-mxs/include/mach/dma.h
rename to include/linux/fsl/mxs-dma.h
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index dd478fc..5f3f3be 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -144,12 +144,14 @@
 enum trace_reg {
 	TRACE_REG_REGISTER,
 	TRACE_REG_UNREGISTER,
+#ifdef CONFIG_PERF_EVENTS
 	TRACE_REG_PERF_REGISTER,
 	TRACE_REG_PERF_UNREGISTER,
 	TRACE_REG_PERF_OPEN,
 	TRACE_REG_PERF_CLOSE,
 	TRACE_REG_PERF_ADD,
 	TRACE_REG_PERF_DEL,
+#endif
 };
 
 struct ftrace_event_call;
diff --git a/include/linux/hsi/Kbuild b/include/linux/hsi/Kbuild
new file mode 100644
index 0000000..271a770
--- /dev/null
+++ b/include/linux/hsi/Kbuild
@@ -0,0 +1 @@
+header-y += hsi_char.h
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
new file mode 100644
index 0000000..4b17806
--- /dev/null
+++ b/include/linux/hsi/hsi.h
@@ -0,0 +1,410 @@
+/*
+ * HSI core header file.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_H__
+#define __LINUX_HSI_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+/* HSI message ttype */
+#define HSI_MSG_READ	0
+#define HSI_MSG_WRITE	1
+
+/* HSI configuration values */
+enum {
+	HSI_MODE_STREAM	= 1,
+	HSI_MODE_FRAME,
+};
+
+enum {
+	HSI_FLOW_SYNC,	/* Synchronized flow */
+	HSI_FLOW_PIPE,	/* Pipelined flow */
+};
+
+enum {
+	HSI_ARB_RR,	/* Round-robin arbitration */
+	HSI_ARB_PRIO,	/* Channel priority arbitration */
+};
+
+#define HSI_MAX_CHANNELS	16
+
+/* HSI message status codes */
+enum {
+	HSI_STATUS_COMPLETED,	/* Message transfer is completed */
+	HSI_STATUS_PENDING,	/* Message pending to be read/write (POLL) */
+	HSI_STATUS_PROCEEDING,	/* Message transfer is ongoing */
+	HSI_STATUS_QUEUED,	/* Message waiting to be served */
+	HSI_STATUS_ERROR,	/* Error when message transfer was ongoing */
+};
+
+/* HSI port event codes */
+enum {
+	HSI_EVENT_START_RX,
+	HSI_EVENT_STOP_RX,
+};
+
+/**
+ * struct hsi_config - Configuration for RX/TX HSI modules
+ * @mode: Bit transmission mode (STREAM or FRAME)
+ * @channels: Number of channels to use [1..16]
+ * @speed: Max bit transmission speed (Kbit/s)
+ * @flow: RX flow type (SYNCHRONIZED or PIPELINE)
+ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
+ */
+struct hsi_config {
+	unsigned int	mode;
+	unsigned int	channels;
+	unsigned int	speed;
+	union {
+		unsigned int	flow;		/* RX only */
+		unsigned int	arb_mode;	/* TX only */
+	};
+};
+
+/**
+ * struct hsi_board_info - HSI client board info
+ * @name: Name for the HSI device
+ * @hsi_id: HSI controller id where the client sits
+ * @port: Port number in the controller where the client sits
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @platform_data: Platform related data
+ * @archdata: Architecture-dependent device data
+ */
+struct hsi_board_info {
+	const char		*name;
+	unsigned int		hsi_id;
+	unsigned int		port;
+	struct hsi_config	tx_cfg;
+	struct hsi_config	rx_cfg;
+	void			*platform_data;
+	struct dev_archdata	*archdata;
+};
+
+#ifdef CONFIG_HSI_BOARDINFO
+extern int hsi_register_board_info(struct hsi_board_info const *info,
+							unsigned int len);
+#else
+static inline int hsi_register_board_info(struct hsi_board_info const *info,
+							unsigned int len)
+{
+	return 0;
+}
+#endif /* CONFIG_HSI_BOARDINFO */
+
+/**
+ * struct hsi_client - HSI client attached to an HSI port
+ * @device: Driver model representation of the device
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @hsi_start_rx: Called after incoming wake line goes high
+ * @hsi_stop_rx: Called after incoming wake line goes low
+ */
+struct hsi_client {
+	struct device		device;
+	struct hsi_config	tx_cfg;
+	struct hsi_config	rx_cfg;
+	void			(*hsi_start_rx)(struct hsi_client *cl);
+	void			(*hsi_stop_rx)(struct hsi_client *cl);
+	/* private: */
+	unsigned int		pclaimed:1;
+	struct list_head	link;
+};
+
+#define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
+
+static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data)
+{
+	dev_set_drvdata(&cl->device, data);
+}
+
+static inline void *hsi_client_drvdata(struct hsi_client *cl)
+{
+	return dev_get_drvdata(&cl->device);
+}
+
+/**
+ * struct hsi_client_driver - Driver associated to an HSI client
+ * @driver: Driver model representation of the driver
+ */
+struct hsi_client_driver {
+	struct device_driver	driver;
+};
+
+#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\
+									driver)
+
+int hsi_register_client_driver(struct hsi_client_driver *drv);
+
+static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+
+/**
+ * struct hsi_msg - HSI message descriptor
+ * @link: Free to use by the current descriptor owner
+ * @cl: HSI device client that issues the transfer
+ * @sgt: Head of the scatterlist array
+ * @context: Client context data associated to the transfer
+ * @complete: Transfer completion callback
+ * @destructor: Destructor to free resources when flushing
+ * @status: Status of the transfer when completed
+ * @actual_len: Actual length of data transfered on completion
+ * @channel: Channel were to TX/RX the message
+ * @ttype: Transfer type (TX if set, RX otherwise)
+ * @break_frame: if true HSI will send/receive a break frame. Data buffers are
+ *		ignored in the request.
+ */
+struct hsi_msg {
+	struct list_head	link;
+	struct hsi_client	*cl;
+	struct sg_table		sgt;
+	void			*context;
+
+	void			(*complete)(struct hsi_msg *msg);
+	void			(*destructor)(struct hsi_msg *msg);
+
+	int			status;
+	unsigned int		actual_len;
+	unsigned int		channel;
+	unsigned int		ttype:1;
+	unsigned int		break_frame:1;
+};
+
+struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags);
+void hsi_free_msg(struct hsi_msg *msg);
+
+/**
+ * struct hsi_port - HSI port device
+ * @device: Driver model representation of the device
+ * @tx_cfg: Current TX path configuration
+ * @rx_cfg: Current RX path configuration
+ * @num: Port number
+ * @shared: Set when port can be shared by different clients
+ * @claimed: Reference count of clients which claimed the port
+ * @lock: Serialize port claim
+ * @async: Asynchronous transfer callback
+ * @setup: Callback to set the HSI client configuration
+ * @flush: Callback to clean the HW state and destroy all pending transfers
+ * @start_tx: Callback to inform that a client wants to TX data
+ * @stop_tx: Callback to inform that a client no longer wishes to TX data
+ * @release: Callback to inform that a client no longer uses the port
+ * @clients: List of hsi_clients using the port.
+ * @clock: Lock to serialize access to the clients list.
+ */
+struct hsi_port {
+	struct device			device;
+	struct hsi_config		tx_cfg;
+	struct hsi_config		rx_cfg;
+	unsigned int			num;
+	unsigned int			shared:1;
+	int				claimed;
+	struct mutex			lock;
+	int				(*async)(struct hsi_msg *msg);
+	int				(*setup)(struct hsi_client *cl);
+	int				(*flush)(struct hsi_client *cl);
+	int				(*start_tx)(struct hsi_client *cl);
+	int				(*stop_tx)(struct hsi_client *cl);
+	int				(*release)(struct hsi_client *cl);
+	struct list_head		clients;
+	spinlock_t			clock;
+};
+
+#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
+#define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
+
+void hsi_event(struct hsi_port *port, unsigned int event);
+int hsi_claim_port(struct hsi_client *cl, unsigned int share);
+void hsi_release_port(struct hsi_client *cl);
+
+static inline int hsi_port_claimed(struct hsi_client *cl)
+{
+	return cl->pclaimed;
+}
+
+static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data)
+{
+	dev_set_drvdata(&port->device, data);
+}
+
+static inline void *hsi_port_drvdata(struct hsi_port *port)
+{
+	return dev_get_drvdata(&port->device);
+}
+
+/**
+ * struct hsi_controller - HSI controller device
+ * @device: Driver model representation of the device
+ * @owner: Pointer to the module owning the controller
+ * @id: HSI controller ID
+ * @num_ports: Number of ports in the HSI controller
+ * @port: Array of HSI ports
+ */
+struct hsi_controller {
+	struct device		device;
+	struct module		*owner;
+	unsigned int		id;
+	unsigned int		num_ports;
+	struct hsi_port		*port;
+};
+
+#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
+
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags);
+void hsi_free_controller(struct hsi_controller *hsi);
+int hsi_register_controller(struct hsi_controller *hsi);
+void hsi_unregister_controller(struct hsi_controller *hsi);
+
+static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi,
+								void *data)
+{
+	dev_set_drvdata(&hsi->device, data);
+}
+
+static inline void *hsi_controller_drvdata(struct hsi_controller *hsi)
+{
+	return dev_get_drvdata(&hsi->device);
+}
+
+static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi,
+							unsigned int num)
+{
+	return (num < hsi->num_ports) ? &hsi->port[num] : NULL;
+}
+
+/*
+ * API for HSI clients
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg);
+
+/**
+ * hsi_id - Get HSI controller ID associated to a client
+ * @cl: Pointer to a HSI client
+ *
+ * Return the controller id where the client is attached to
+ */
+static inline unsigned int hsi_id(struct hsi_client *cl)
+{
+	return	to_hsi_controller(cl->device.parent->parent)->id;
+}
+
+/**
+ * hsi_port_id - Gets the port number a client is attached to
+ * @cl: Pointer to HSI client
+ *
+ * Return the port number associated to the client
+ */
+static inline unsigned int hsi_port_id(struct hsi_client *cl)
+{
+	return	to_hsi_port(cl->device.parent)->num;
+}
+
+/**
+ * hsi_setup - Configure the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * When sharing ports, clients should either relay on a single
+ * client setup or have the same setup for all of them.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_setup(struct hsi_client *cl)
+{
+	if (!hsi_port_claimed(cl))
+		return -EACCES;
+	return	hsi_get_port(cl)->setup(cl);
+}
+
+/**
+ * hsi_flush - Flush all pending transactions on the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * This function will destroy all pending hsi_msg in the port and reset
+ * the HW port so it is ready to receive and transmit from a clean state.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_flush(struct hsi_client *cl)
+{
+	if (!hsi_port_claimed(cl))
+		return -EACCES;
+	return hsi_get_port(cl)->flush(cl);
+}
+
+/**
+ * hsi_async_read - Submit a read transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg)
+{
+	msg->ttype = HSI_MSG_READ;
+	return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_async_write - Submit a write transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg)
+{
+	msg->ttype = HSI_MSG_WRITE;
+	return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_start_tx - Signal the port that the client wants to start a TX
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_start_tx(struct hsi_client *cl)
+{
+	if (!hsi_port_claimed(cl))
+		return -EACCES;
+	return hsi_get_port(cl)->start_tx(cl);
+}
+
+/**
+ * hsi_stop_tx - Signal the port that the client no longer wants to transmit
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_stop_tx(struct hsi_client *cl)
+{
+	if (!hsi_port_claimed(cl))
+		return -EACCES;
+	return hsi_get_port(cl)->stop_tx(cl);
+}
+#endif /* __LINUX_HSI_H__ */
diff --git a/include/linux/hsi/hsi_char.h b/include/linux/hsi/hsi_char.h
new file mode 100644
index 0000000..76160b4
--- /dev/null
+++ b/include/linux/hsi/hsi_char.h
@@ -0,0 +1,63 @@
+/*
+ * Part of the HSI character device driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Andras Domokos <andras.domokos at nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+
+#ifndef __HSI_CHAR_H
+#define __HSI_CHAR_H
+
+#define HSI_CHAR_MAGIC		'k'
+#define HSC_IOW(num, dtype)	_IOW(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IOR(num, dtype)	_IOR(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IOWR(num, dtype)	_IOWR(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IO(num)		_IO(HSI_CHAR_MAGIC, num)
+
+#define HSC_RESET		HSC_IO(16)
+#define HSC_SET_PM		HSC_IO(17)
+#define HSC_SEND_BREAK		HSC_IO(18)
+#define HSC_SET_RX		HSC_IOW(19, struct hsc_rx_config)
+#define HSC_GET_RX		HSC_IOW(20, struct hsc_rx_config)
+#define HSC_SET_TX		HSC_IOW(21, struct hsc_tx_config)
+#define HSC_GET_TX		HSC_IOW(22, struct hsc_tx_config)
+
+#define HSC_PM_DISABLE		0
+#define HSC_PM_ENABLE		1
+
+#define HSC_MODE_STREAM		1
+#define HSC_MODE_FRAME		2
+#define HSC_FLOW_SYNC		0
+#define HSC_ARB_RR		0
+#define HSC_ARB_PRIO		1
+
+struct hsc_rx_config {
+	uint32_t mode;
+	uint32_t flow;
+	uint32_t channels;
+};
+
+struct hsc_tx_config {
+	uint32_t mode;
+	uint32_t channels;
+	uint32_t speed;
+	uint32_t arb_mode;
+};
+
+#endif /* __HSI_CHAR_H */
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
index 79c4f26..18a5d02 100644
--- a/include/linux/if_eql.h
+++ b/include/linux/if_eql.h
@@ -22,7 +22,7 @@
 #define EQL_DEFAULT_SLAVE_PRIORITY 28800
 #define EQL_DEFAULT_MAX_SLAVES     4
 #define EQL_DEFAULT_MTU            576
-#define EQL_DEFAULT_RESCHED_IVAL   100
+#define EQL_DEFAULT_RESCHED_IVAL   HZ
 
 #define EQL_ENSLAVE     (SIOCDEVPRIVATE)
 #define EQL_EMANCIPATE  (SIOCDEVPRIVATE + 1)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index bff29c5..7810406 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -263,6 +263,11 @@
 	d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
 }
 
+static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+	return d->hwirq;
+}
+
 /**
  * struct irq_chip - hardware interrupt chip descriptor
  *
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ead4a42..c65740d 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -42,12 +42,6 @@
 /* Number of irqs reserved for a legacy isa controller */
 #define NUM_ISA_INTERRUPTS	16
 
-/* This type is the placeholder for a hardware interrupt number. It has to
- * be big enough to enclose whatever representation is used by a given
- * platform.
- */
-typedef unsigned long irq_hw_number_t;
-
 /**
  * struct irq_domain_ops - Methods for irq_domain objects
  * @match: Match an interrupt controller device node to a host, returns
@@ -104,6 +98,9 @@
 			unsigned int size;
 			unsigned int *revmap;
 		} linear;
+		struct {
+			unsigned int max_irq;
+		} nomap;
 		struct radix_tree_root tree;
 	} revmap_data;
 	const struct irq_domain_ops *ops;
@@ -126,6 +123,7 @@
 					 const struct irq_domain_ops *ops,
 					 void *host_data);
 struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
 					 const struct irq_domain_ops *ops,
 					 void *host_data);
 struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
@@ -134,7 +132,6 @@
 
 extern struct irq_domain *irq_find_host(struct device_node *node);
 extern void irq_set_default_host(struct irq_domain *host);
-extern void irq_set_virq_count(unsigned int count);
 
 static inline struct irq_domain *irq_domain_add_legacy_isa(
 				struct device_node *of_node,
@@ -146,7 +143,6 @@
 }
 extern struct irq_domain *irq_find_host(struct device_node *node);
 extern void irq_set_default_host(struct irq_domain *host);
-extern void irq_set_virq_count(unsigned int count);
 
 
 extern unsigned int irq_create_mapping(struct irq_domain *host,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index a5375e7..645231c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -430,16 +430,10 @@
  * Most likely, you want to use tracing_on/tracing_off.
  */
 #ifdef CONFIG_RING_BUFFER
-void tracing_on(void);
-void tracing_off(void);
 /* trace_off_permanent stops recording with no way to bring it back */
 void tracing_off_permanent(void);
-int tracing_is_on(void);
 #else
-static inline void tracing_on(void) { }
-static inline void tracing_off(void) { }
 static inline void tracing_off_permanent(void) { }
-static inline int tracing_is_on(void) { return 0; }
 #endif
 
 enum ftrace_dump_mode {
@@ -449,6 +443,10 @@
 };
 
 #ifdef CONFIG_TRACING
+void tracing_on(void);
+void tracing_off(void);
+int tracing_is_on(void);
+
 extern void tracing_start(void);
 extern void tracing_stop(void);
 extern void ftrace_off_permanent(void);
@@ -533,6 +531,11 @@
 static inline void tracing_stop(void) { }
 static inline void ftrace_off_permanent(void) { }
 static inline void trace_dump_stack(void) { }
+
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline int tracing_is_on(void) { return 0; }
+
 static inline int
 trace_printk(const char *fmt, ...)
 {
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index fa39183..c4d2fc1 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -63,7 +63,8 @@
 	BP_HARDWARE_BREAKPOINT,
 	BP_WRITE_WATCHPOINT,
 	BP_READ_WATCHPOINT,
-	BP_ACCESS_WATCHPOINT
+	BP_ACCESS_WATCHPOINT,
+	BP_POKE_BREAKPOINT,
 };
 
 enum kgdb_bpstate {
@@ -207,8 +208,8 @@
 
 /* Optional functions. */
 extern int kgdb_validate_break_address(unsigned long addr);
-extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
-extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
+extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
+extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
 
 /**
  *	kgdb_arch_late - Perform any architecture specific initalization.
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 9efeae6..dd99c329 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -110,12 +110,29 @@
 
 extern struct ctl_table usermodehelper_table[];
 
+enum umh_disable_depth {
+	UMH_ENABLED = 0,
+	UMH_FREEZING,
+	UMH_DISABLED,
+};
+
 extern void usermodehelper_init(void);
 
-extern int usermodehelper_disable(void);
-extern void usermodehelper_enable(void);
-extern bool usermodehelper_is_disabled(void);
-extern void read_lock_usermodehelper(void);
-extern void read_unlock_usermodehelper(void);
+extern int __usermodehelper_disable(enum umh_disable_depth depth);
+extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
+
+static inline int usermodehelper_disable(void)
+{
+	return __usermodehelper_disable(UMH_DISABLED);
+}
+
+static inline void usermodehelper_enable(void)
+{
+	__usermodehelper_set_disable_depth(UMH_ENABLED);
+}
+
+extern int usermodehelper_read_trylock(void);
+extern long usermodehelper_read_lock_wait(long timeout);
+extern void usermodehelper_read_unlock(void);
 
 #endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/lp8727.h b/include/linux/lp8727.h
index d21fa28..ea98c61 100644
--- a/include/linux/lp8727.h
+++ b/include/linux/lp8727.h
@@ -1,4 +1,7 @@
 /*
+ * LP8727 Micro/Mini USB IC with integrated charger
+ *
+ *			Copyright (C) 2011 Texas Instruments
  *			Copyright (C) 2011 National Semiconductor
  *
  * This program is free software; you can redistribute it and/or modify
@@ -32,13 +35,24 @@
 	ICHG_1000mA,
 };
 
+/**
+ * struct lp8727_chg_param
+ * @eoc_level : end of charge level setting
+ * @ichg : charging current
+ */
 struct lp8727_chg_param {
-	/* end of charge level setting */
 	enum lp8727_eoc_level eoc_level;
-	/* charging current */
 	enum lp8727_ichg ichg;
 };
 
+/**
+ * struct lp8727_platform_data
+ * @get_batt_present : check battery status - exists or not
+ * @get_batt_level : get battery voltage (mV)
+ * @get_batt_capacity : get battery capacity (%)
+ * @get_batt_temp : get battery temperature
+ * @ac, @usb : charging parameters each charger type
+ */
 struct lp8727_platform_data {
 	u8 (*get_batt_present)(void);
 	u16 (*get_batt_level)(void);
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index eab507f..fad48aa 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -22,6 +22,23 @@
 #include <linux/key.h>
 #include <linux/skbuff.h>
 
+struct lsm_network_audit {
+	int netif;
+	struct sock *sk;
+	u16 family;
+	__be16 dport;
+	__be16 sport;
+	union {
+		struct {
+			__be32 daddr;
+			__be32 saddr;
+		} v4;
+		struct {
+			struct in6_addr daddr;
+			struct in6_addr saddr;
+		} v6;
+	} fam;
+};
 
 /* Auxiliary data to use in generating the audit record. */
 struct common_audit_data {
@@ -41,23 +58,7 @@
 		struct path path;
 		struct dentry *dentry;
 		struct inode *inode;
-		struct {
-			int netif;
-			struct sock *sk;
-			u16 family;
-			__be16 dport;
-			__be16 sport;
-			union {
-				struct {
-					__be32 daddr;
-					__be32 saddr;
-				} v4;
-				struct {
-					struct in6_addr daddr;
-					struct in6_addr saddr;
-				} v6;
-			} fam;
-		} net;
+		struct lsm_network_audit *net;
 		int cap;
 		int ipc_id;
 		struct task_struct *tsk;
@@ -72,64 +73,15 @@
 	/* this union contains LSM specific data */
 	union {
 #ifdef CONFIG_SECURITY_SMACK
-		/* SMACK data */
-		struct smack_audit_data {
-			const char *function;
-			char *subject;
-			char *object;
-			char *request;
-			int result;
-		} smack_audit_data;
+		struct smack_audit_data *smack_audit_data;
 #endif
 #ifdef CONFIG_SECURITY_SELINUX
-		/* SELinux data */
-		struct {
-			u32 ssid;
-			u32 tsid;
-			u16 tclass;
-			u32 requested;
-			u32 audited;
-			u32 denied;
-			/*
-			 * auditdeny is a bit tricky and unintuitive.  See the
-			 * comments in avc.c for it's meaning and usage.
-			 */
-			u32 auditdeny;
-			struct av_decision *avd;
-			int result;
-		} selinux_audit_data;
+		struct selinux_audit_data *selinux_audit_data;
 #endif
 #ifdef CONFIG_SECURITY_APPARMOR
-		struct {
-			int error;
-			int op;
-			int type;
-			void *profile;
-			const char *name;
-			const char *info;
-			union {
-				void *target;
-				struct {
-					long pos;
-					void *target;
-				} iface;
-				struct {
-					int rlim;
-					unsigned long max;
-				} rlim;
-				struct {
-					const char *target;
-					u32 request;
-					u32 denied;
-					uid_t ouid;
-				} fs;
-			};
-		} apparmor_audit_data;
+		struct apparmor_audit_data *apparmor_audit_data;
 #endif
-	};
-	/* these callback will be implemented by a specific LSM */
-	void (*lsm_pre_audit)(struct audit_buffer *, void *);
-	void (*lsm_post_audit)(struct audit_buffer *, void *);
+	}; /* per LSM data pointer union */
 };
 
 #define v4info fam.v4
@@ -146,6 +98,8 @@
 	{ memset((_d), 0, sizeof(struct common_audit_data)); \
 	 (_d)->type = LSM_AUDIT_DATA_##_t; }
 
-void common_lsm_audit(struct common_audit_data *a);
+void common_lsm_audit(struct common_audit_data *a,
+	void (*pre_audit)(struct audit_buffer *, void *),
+	void (*post_audit)(struct audit_buffer *, void *));
 
 #endif
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 5fa6974..ee96cd5 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -146,6 +146,279 @@
 	u8 setting;
 };
 
+/* Battery driver related data */
+/*
+ * ADC for the battery thermistor.
+ * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
+ * with a NTC resistor to both identify the battery and to measure its
+ * temperature. Different phone manufactures uses different techniques to both
+ * identify the battery and to read its temperature.
+ */
+enum abx500_adc_therm {
+	ABx500_ADC_THERM_BATCTRL,
+	ABx500_ADC_THERM_BATTEMP,
+};
+
+/**
+ * struct abx500_res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp:			battery pack temperature in Celcius
+ * @resist:			NTC resistor net total resistance
+ */
+struct abx500_res_to_temp {
+	int temp;
+	int resist;
+};
+
+/**
+ * struct abx500_v_to_cap - Table for translating voltage to capacity
+ * @voltage:		Voltage in mV
+ * @capacity:		Capacity in percent
+ */
+struct abx500_v_to_cap {
+	int voltage;
+	int capacity;
+};
+
+/* Forward declaration */
+struct abx500_fg;
+
+/**
+ * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer:	Time between measurements while recovering
+ * @recovery_total_time:	Total recovery time
+ * @init_timer:			Measurement interval during startup
+ * @init_discard_time:		Time we discard voltage measurement at startup
+ * @init_total_time:		Total init time during startup
+ * @high_curr_time:		Time current has to be high to go to recovery
+ * @accu_charging:		FG accumulation time while charging
+ * @accu_high_curr:		FG accumulation time in high current mode
+ * @high_curr_threshold:	High current threshold, in mA
+ * @lowbat_threshold:		Low battery threshold, in mV
+ * @overbat_threshold:		Over battery threshold, in mV
+ * @battok_falling_th_sel0	Threshold in mV for battOk signal sel0
+ *				Resolution in 50 mV step.
+ * @battok_raising_th_sel1	Threshold in mV for battOk signal sel1
+ *				Resolution in 50 mV step.
+ * @user_cap_limit		Capacity reported from user must be within this
+ *				limit to be considered as sane, in percentage
+ *				points.
+ * @maint_thres			This is the threshold where we stop reporting
+ *				battery full while in maintenance, in per cent
+ */
+struct abx500_fg_parameters {
+	int recovery_sleep_timer;
+	int recovery_total_time;
+	int init_timer;
+	int init_discard_time;
+	int init_total_time;
+	int high_curr_time;
+	int accu_charging;
+	int accu_high_curr;
+	int high_curr_threshold;
+	int lowbat_threshold;
+	int overbat_threshold;
+	int battok_falling_th_sel0;
+	int battok_raising_th_sel1;
+	int user_cap_limit;
+	int maint_thres;
+};
+
+/**
+ * struct abx500_charger_maximization - struct used by the board config.
+ * @use_maxi:		Enable maximization for this battery type
+ * @maxi_chg_curr:	Maximum charger current allowed
+ * @maxi_wait_cycles:	cycles to wait before setting charger current
+ * @charger_curr_step	delta between two charger current settings (mA)
+ */
+struct abx500_maxim_parameters {
+	bool ena_maxi;
+	int chg_curr;
+	int wait_cycles;
+	int charger_curr_step;
+};
+
+/**
+ * struct abx500_battery_type - different batteries supported
+ * @name:			battery technology
+ * @resis_high:			battery upper resistance limit
+ * @resis_low:			battery lower resistance limit
+ * @charge_full_design:		Maximum battery capacity in mAh
+ * @nominal_voltage:		Nominal voltage of the battery in mV
+ * @termination_vol:		max voltage upto which battery can be charged
+ * @termination_curr		battery charging termination current in mA
+ * @recharge_vol		battery voltage limit that will trigger a new
+ *				full charging cycle in the case where maintenan-
+ *				-ce charging has been disabled
+ * @normal_cur_lvl:		charger current in normal state in mA
+ * @normal_vol_lvl:		charger voltage in normal state in mV
+ * @maint_a_cur_lvl:		charger current in maintenance A state in mA
+ * @maint_a_vol_lvl:		charger voltage in maintenance A state in mV
+ * @maint_a_chg_timer_h:	charge time in maintenance A state
+ * @maint_b_cur_lvl:		charger current in maintenance B state in mA
+ * @maint_b_vol_lvl:		charger voltage in maintenance B state in mV
+ * @maint_b_chg_timer_h:	charge time in maintenance B state
+ * @low_high_cur_lvl:		charger current in temp low/high state in mA
+ * @low_high_vol_lvl:		charger voltage in temp low/high state in mV'
+ * @battery_resistance:		battery inner resistance in mOhm.
+ * @n_r_t_tbl_elements:		number of elements in r_to_t_tbl
+ * @r_to_t_tbl:			table containing resistance to temp points
+ * @n_v_cap_tbl_elements:	number of elements in v_to_cap_tbl
+ * @v_to_cap_tbl:		Voltage to capacity (in %) table
+ * @n_batres_tbl_elements	number of elements in the batres_tbl
+ * @batres_tbl			battery internal resistance vs temperature table
+ */
+struct abx500_battery_type {
+	int name;
+	int resis_high;
+	int resis_low;
+	int charge_full_design;
+	int nominal_voltage;
+	int termination_vol;
+	int termination_curr;
+	int recharge_vol;
+	int normal_cur_lvl;
+	int normal_vol_lvl;
+	int maint_a_cur_lvl;
+	int maint_a_vol_lvl;
+	int maint_a_chg_timer_h;
+	int maint_b_cur_lvl;
+	int maint_b_vol_lvl;
+	int maint_b_chg_timer_h;
+	int low_high_cur_lvl;
+	int low_high_vol_lvl;
+	int battery_resistance;
+	int n_temp_tbl_elements;
+	struct abx500_res_to_temp *r_to_t_tbl;
+	int n_v_cap_tbl_elements;
+	struct abx500_v_to_cap *v_to_cap_tbl;
+	int n_batres_tbl_elements;
+	struct batres_vs_temp *batres_tbl;
+};
+
+/**
+ * struct abx500_bm_capacity_levels - abx500 capacity level data
+ * @critical:		critical capacity level in percent
+ * @low:		low capacity level in percent
+ * @normal:		normal capacity level in percent
+ * @high:		high capacity level in percent
+ * @full:		full capacity level in percent
+ */
+struct abx500_bm_capacity_levels {
+	int critical;
+	int low;
+	int normal;
+	int high;
+	int full;
+};
+
+/**
+ * struct abx500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max:	maximum allowed USB charger voltage in mV
+ * @usb_curr_max:	maximum allowed USB charger current in mA
+ * @ac_volt_max:	maximum allowed AC charger voltage in mV
+ * @ac_curr_max:	maximum allowed AC charger current in mA
+ */
+struct abx500_bm_charger_parameters {
+	int usb_volt_max;
+	int usb_curr_max;
+	int ac_volt_max;
+	int ac_curr_max;
+};
+
+/**
+ * struct abx500_bm_data - abx500 battery management data
+ * @temp_under		under this temp, charging is stopped
+ * @temp_low		between this temp and temp_under charging is reduced
+ * @temp_high		between this temp and temp_over charging is reduced
+ * @temp_over		over this temp, charging is stopped
+ * @temp_now		present battery temperature
+ * @temp_interval_chg	temperature measurement interval in s when charging
+ * @temp_interval_nochg	temperature measurement interval in s when not charging
+ * @main_safety_tmr_h	safety timer for main charger
+ * @usb_safety_tmr_h	safety timer for usb charger
+ * @bkup_bat_v		voltage which we charge the backup battery with
+ * @bkup_bat_i		current which we charge the backup battery with
+ * @no_maintenance	indicates that maintenance charging is disabled
+ * @abx500_adc_therm	placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat	flag to enable charging of unknown batteries
+ * @enable_overshoot	flag to enable VBAT overshoot control
+ * @auto_trig		flag to enable auto adc trigger
+ * @fg_res		resistance of FG resistor in 0.1mOhm
+ * @n_btypes		number of elements in array bat_type
+ * @batt_id		index of the identified battery in array bat_type
+ * @interval_charging	charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis	temperature hysteresis
+ * @gnd_lift_resistance	Battery ground to phone ground resistance (mOhm)
+ * @maxi:		maximization parameters
+ * @cap_levels		capacity in percent for the different capacity levels
+ * @bat_type		table of supported battery types
+ * @chg_params		charger parameters
+ * @fg_params		fuel gauge parameters
+ */
+struct abx500_bm_data {
+	int temp_under;
+	int temp_low;
+	int temp_high;
+	int temp_over;
+	int temp_now;
+	int temp_interval_chg;
+	int temp_interval_nochg;
+	int main_safety_tmr_h;
+	int usb_safety_tmr_h;
+	int bkup_bat_v;
+	int bkup_bat_i;
+	bool no_maintenance;
+	bool chg_unknown_bat;
+	bool enable_overshoot;
+	bool auto_trig;
+	enum abx500_adc_therm adc_therm;
+	int fg_res;
+	int n_btypes;
+	int batt_id;
+	int interval_charging;
+	int interval_not_charging;
+	int temp_hysteresis;
+	int gnd_lift_resistance;
+	const struct abx500_maxim_parameters *maxi;
+	const struct abx500_bm_capacity_levels *cap_levels;
+	const struct abx500_battery_type *bat_type;
+	const struct abx500_bm_charger_parameters *chg_params;
+	const struct abx500_fg_parameters *fg_params;
+};
+
+struct abx500_chargalg_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+};
+
+struct abx500_charger_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+	bool autopower_cfg;
+};
+
+struct abx500_btemp_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+};
+
+struct abx500_fg_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+};
+
+struct abx500_bm_plat_data {
+	struct abx500_bm_data *battery;
+	struct abx500_charger_platform_data *charger;
+	struct abx500_btemp_platform_data *btemp;
+	struct abx500_fg_platform_data *fg;
+	struct abx500_chargalg_platform_data *chargalg;
+};
+
 int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
 	u8 value);
 int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
new file mode 100644
index 0000000..44310c9
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright ST-Ericsson 2012.
+ *
+ * Author: Arun Murthy <arun.murthy@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB8500_BM_H
+#define _AB8500_BM_H
+
+#include <linux/kernel.h>
+#include <linux/mfd/abx500.h>
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB8500_MAIN_WDOG_CTRL_REG	0x01
+#define AB8500_LOW_BAT_REG		0x03
+#define AB8500_BATT_OK_REG		0x04
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB8500_USB_LINE_STAT_REG	0x80
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_STATUS1_REG		0x00
+#define AB8500_CH_STATUS2_REG		0x01
+#define AB8500_CH_USBCH_STAT1_REG	0x02
+#define AB8500_CH_USBCH_STAT2_REG	0x03
+#define AB8500_CH_FSM_STAT_REG		0x04
+#define AB8500_CH_STAT_REG		0x05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_VOLT_LVL_REG		0x40
+#define AB8500_CH_VOLT_LVL_MAX_REG	0x41  /*Only in Cut2.0*/
+#define AB8500_CH_OPT_CRNTLVL_REG	0x42
+#define AB8500_CH_OPT_CRNTLVL_MAX_REG	0x43  /*Only in Cut2.0*/
+#define AB8500_CH_WD_TIMER_REG		0x50
+#define AB8500_CHARG_WD_CTRL		0x51
+#define AB8500_BTEMP_HIGH_TH		0x52
+#define AB8500_LED_INDICATOR_PWM_CTRL	0x53
+#define AB8500_LED_INDICATOR_PWM_DUTY	0x54
+#define AB8500_BATT_OVV			0x55
+#define AB8500_CHARGER_CTRL		0x56
+#define AB8500_BAT_CTRL_CURRENT_SOURCE	0x60  /*Only in Cut2.0*/
+
+/*
+ * Charger / main control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_MCH_CTRL1		0x80
+#define AB8500_MCH_CTRL2		0x81
+#define AB8500_MCH_IPT_CURLVL_REG	0x82
+#define AB8500_CH_WD_REG		0x83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_USBCH_CTRL1_REG		0xC0
+#define AB8500_USBCH_CTRL2_REG		0xC1
+#define AB8500_USBCH_IPT_CRNTLVL_REG	0xC2
+
+/*
+ * Gas Gauge register offsets
+ * Bank : 0x0C
+ */
+#define AB8500_GASG_CC_CTRL_REG		0x00
+#define AB8500_GASG_CC_ACCU1_REG	0x01
+#define AB8500_GASG_CC_ACCU2_REG	0x02
+#define AB8500_GASG_CC_ACCU3_REG	0x03
+#define AB8500_GASG_CC_ACCU4_REG	0x04
+#define AB8500_GASG_CC_SMPL_CNTRL_REG	0x05
+#define AB8500_GASG_CC_SMPL_CNTRH_REG	0x06
+#define AB8500_GASG_CC_SMPL_CNVL_REG	0x07
+#define AB8500_GASG_CC_SMPL_CNVH_REG	0x08
+#define AB8500_GASG_CC_CNTR_AVGOFF_REG	0x09
+#define AB8500_GASG_CC_OFFSET_REG	0x0A
+#define AB8500_GASG_CC_NCOV_ACCU	0x10
+#define AB8500_GASG_CC_NCOV_ACCU_CTRL	0x11
+#define AB8500_GASG_CC_NCOV_ACCU_LOW	0x12
+#define AB8500_GASG_CC_NCOV_ACCU_MED	0x13
+#define AB8500_GASG_CC_NCOV_ACCU_HIGH	0x14
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB8500_IT_SOURCE2_REG		0x01
+#define AB8500_IT_SOURCE21_REG		0x14
+
+/*
+ * RTC register offsets
+ * Bank: 0x0F
+ */
+#define AB8500_RTC_BACKUP_CHG_REG	0x0C
+#define AB8500_RTC_CC_CONF_REG		0x01
+#define AB8500_RTC_CTRL_REG		0x0B
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_OTP_CONF_15		0x0E
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define ADC_RESOLUTION			1024
+#define ADC_CH_MAIN_MIN			0
+#define ADC_CH_MAIN_MAX			20030
+#define ADC_CH_VBUS_MIN			0
+#define ADC_CH_VBUS_MAX			20030
+#define ADC_CH_VBAT_MIN			2300
+#define ADC_CH_VBAT_MAX			4800
+#define ADC_CH_BKBAT_MIN		0
+#define ADC_CH_BKBAT_MAX		3200
+
+/* Main charge i/p current */
+#define MAIN_CH_IP_CUR_0P9A		0x80
+#define MAIN_CH_IP_CUR_1P0A		0x90
+#define MAIN_CH_IP_CUR_1P1A		0xA0
+#define MAIN_CH_IP_CUR_1P2A		0xB0
+#define MAIN_CH_IP_CUR_1P3A		0xC0
+#define MAIN_CH_IP_CUR_1P4A		0xD0
+#define MAIN_CH_IP_CUR_1P5A		0xE0
+
+/* ChVoltLevel */
+#define CH_VOL_LVL_3P5			0x00
+#define CH_VOL_LVL_4P0			0x14
+#define CH_VOL_LVL_4P05			0x16
+#define CH_VOL_LVL_4P1			0x1B
+#define CH_VOL_LVL_4P15			0x20
+#define CH_VOL_LVL_4P2			0x25
+#define CH_VOL_LVL_4P6			0x4D
+
+/* ChOutputCurrentLevel */
+#define CH_OP_CUR_LVL_0P1		0x00
+#define CH_OP_CUR_LVL_0P2		0x01
+#define CH_OP_CUR_LVL_0P3		0x02
+#define CH_OP_CUR_LVL_0P4		0x03
+#define CH_OP_CUR_LVL_0P5		0x04
+#define CH_OP_CUR_LVL_0P6		0x05
+#define CH_OP_CUR_LVL_0P7		0x06
+#define CH_OP_CUR_LVL_0P8		0x07
+#define CH_OP_CUR_LVL_0P9		0x08
+#define CH_OP_CUR_LVL_1P4		0x0D
+#define CH_OP_CUR_LVL_1P5		0x0E
+#define CH_OP_CUR_LVL_1P6		0x0F
+
+/* BTEMP High thermal limits */
+#define BTEMP_HIGH_TH_57_0		0x00
+#define BTEMP_HIGH_TH_52		0x01
+#define BTEMP_HIGH_TH_57_1		0x02
+#define BTEMP_HIGH_TH_62		0x03
+
+/* current is mA */
+#define USB_0P1A			100
+#define USB_0P2A			200
+#define USB_0P3A			300
+#define USB_0P4A			400
+#define USB_0P5A			500
+
+#define LOW_BAT_3P1V			0x20
+#define LOW_BAT_2P3V			0x00
+#define LOW_BAT_RESET			0x01
+#define LOW_BAT_ENABLE			0x01
+
+/* Backup battery constants */
+#define BUP_ICH_SEL_50UA		0x00
+#define BUP_ICH_SEL_150UA		0x04
+#define BUP_ICH_SEL_300UA		0x08
+#define BUP_ICH_SEL_700UA		0x0C
+
+#define BUP_VCH_SEL_2P5V		0x00
+#define BUP_VCH_SEL_2P6V		0x01
+#define BUP_VCH_SEL_2P8V		0x02
+#define BUP_VCH_SEL_3P1V		0x03
+
+/* Battery OVV constants */
+#define BATT_OVV_ENA			0x02
+#define BATT_OVV_TH_3P7			0x00
+#define BATT_OVV_TH_4P75		0x01
+
+/* A value to indicate over voltage */
+#define BATT_OVV_VALUE			4750
+
+/* VBUS OVV constants */
+#define VBUS_OVV_SELECT_MASK		0x78
+#define VBUS_OVV_SELECT_5P6V		0x00
+#define VBUS_OVV_SELECT_5P7V		0x08
+#define VBUS_OVV_SELECT_5P8V		0x10
+#define VBUS_OVV_SELECT_5P9V		0x18
+#define VBUS_OVV_SELECT_6P0V		0x20
+#define VBUS_OVV_SELECT_6P1V		0x28
+#define VBUS_OVV_SELECT_6P2V		0x30
+#define VBUS_OVV_SELECT_6P3V		0x38
+
+#define VBUS_AUTO_IN_CURR_LIM_ENA	0x04
+
+/* Fuel Gauge constants */
+#define RESET_ACCU			0x02
+#define READ_REQ			0x01
+#define CC_DEEP_SLEEP_ENA		0x02
+#define CC_PWR_UP_ENA			0x01
+#define CC_SAMPLES_40			0x28
+#define RD_NCONV_ACCU_REQ		0x01
+#define CC_CALIB			0x08
+#define CC_INTAVGOFFSET_ENA		0x10
+#define CC_MUXOFFSET			0x80
+#define CC_INT_CAL_N_AVG_MASK		0x60
+#define CC_INT_CAL_SAMPLES_16		0x40
+#define CC_INT_CAL_SAMPLES_8		0x20
+#define CC_INT_CAL_SAMPLES_4		0x00
+
+/* RTC constants */
+#define RTC_BUP_CH_ENA			0x10
+
+/* BatCtrl Current Source Constants */
+#define BAT_CTRL_7U_ENA			0x01
+#define BAT_CTRL_20U_ENA		0x02
+#define BAT_CTRL_CMP_ENA		0x04
+#define FORCE_BAT_CTRL_CMP_HIGH		0x08
+#define BAT_CTRL_PULL_UP_ENA		0x10
+
+/* Battery type */
+#define BATTERY_UNKNOWN			00
+
+/**
+ * struct res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp:			battery pack temperature in Celcius
+ * @resist:			NTC resistor net total resistance
+ */
+struct res_to_temp {
+	int temp;
+	int resist;
+};
+
+/**
+ * struct batres_vs_temp - defines one point in a temp vs battery internal
+ * resistance curve.
+ * @temp:			battery pack temperature in Celcius
+ * @resist:			battery internal reistance in mOhm
+ */
+struct batres_vs_temp {
+	int temp;
+	int resist;
+};
+
+/* Forward declaration */
+struct ab8500_fg;
+
+/**
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer:	Time between measurements while recovering
+ * @recovery_total_time:	Total recovery time
+ * @init_timer:			Measurement interval during startup
+ * @init_discard_time:		Time we discard voltage measurement at startup
+ * @init_total_time:		Total init time during startup
+ * @high_curr_time:		Time current has to be high to go to recovery
+ * @accu_charging:		FG accumulation time while charging
+ * @accu_high_curr:		FG accumulation time in high current mode
+ * @high_curr_threshold:	High current threshold, in mA
+ * @lowbat_threshold:		Low battery threshold, in mV
+ * @battok_falling_th_sel0	Threshold in mV for battOk signal sel0
+ *				Resolution in 50 mV step.
+ * @battok_raising_th_sel1	Threshold in mV for battOk signal sel1
+ *				Resolution in 50 mV step.
+ * @user_cap_limit		Capacity reported from user must be within this
+ *				limit to be considered as sane, in percentage
+ *				points.
+ * @maint_thres			This is the threshold where we stop reporting
+ *				battery full while in maintenance, in per cent
+ */
+struct ab8500_fg_parameters {
+	int recovery_sleep_timer;
+	int recovery_total_time;
+	int init_timer;
+	int init_discard_time;
+	int init_total_time;
+	int high_curr_time;
+	int accu_charging;
+	int accu_high_curr;
+	int high_curr_threshold;
+	int lowbat_threshold;
+	int battok_falling_th_sel0;
+	int battok_raising_th_sel1;
+	int user_cap_limit;
+	int maint_thres;
+};
+
+/**
+ * struct ab8500_charger_maximization - struct used by the board config.
+ * @use_maxi:		Enable maximization for this battery type
+ * @maxi_chg_curr:	Maximum charger current allowed
+ * @maxi_wait_cycles:	cycles to wait before setting charger current
+ * @charger_curr_step	delta between two charger current settings (mA)
+ */
+struct ab8500_maxim_parameters {
+	bool ena_maxi;
+	int chg_curr;
+	int wait_cycles;
+	int charger_curr_step;
+};
+
+/**
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
+ * @critical:		critical capacity level in percent
+ * @low:		low capacity level in percent
+ * @normal:		normal capacity level in percent
+ * @high:		high capacity level in percent
+ * @full:		full capacity level in percent
+ */
+struct ab8500_bm_capacity_levels {
+	int critical;
+	int low;
+	int normal;
+	int high;
+	int full;
+};
+
+/**
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max:	maximum allowed USB charger voltage in mV
+ * @usb_curr_max:	maximum allowed USB charger current in mA
+ * @ac_volt_max:	maximum allowed AC charger voltage in mV
+ * @ac_curr_max:	maximum allowed AC charger current in mA
+ */
+struct ab8500_bm_charger_parameters {
+	int usb_volt_max;
+	int usb_curr_max;
+	int ac_volt_max;
+	int ac_curr_max;
+};
+
+/**
+ * struct ab8500_bm_data - ab8500 battery management data
+ * @temp_under		under this temp, charging is stopped
+ * @temp_low		between this temp and temp_under charging is reduced
+ * @temp_high		between this temp and temp_over charging is reduced
+ * @temp_over		over this temp, charging is stopped
+ * @temp_interval_chg	temperature measurement interval in s when charging
+ * @temp_interval_nochg	temperature measurement interval in s when not charging
+ * @main_safety_tmr_h	safety timer for main charger
+ * @usb_safety_tmr_h	safety timer for usb charger
+ * @bkup_bat_v		voltage which we charge the backup battery with
+ * @bkup_bat_i		current which we charge the backup battery with
+ * @no_maintenance	indicates that maintenance charging is disabled
+ * @adc_therm		placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat	flag to enable charging of unknown batteries
+ * @enable_overshoot	flag to enable VBAT overshoot control
+ * @fg_res		resistance of FG resistor in 0.1mOhm
+ * @n_btypes		number of elements in array bat_type
+ * @batt_id		index of the identified battery in array bat_type
+ * @interval_charging	charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis	temperature hysteresis
+ * @gnd_lift_resistance	Battery ground to phone ground resistance (mOhm)
+ * @maxi:		maximization parameters
+ * @cap_levels		capacity in percent for the different capacity levels
+ * @bat_type		table of supported battery types
+ * @chg_params		charger parameters
+ * @fg_params		fuel gauge parameters
+ */
+struct ab8500_bm_data {
+	int temp_under;
+	int temp_low;
+	int temp_high;
+	int temp_over;
+	int temp_interval_chg;
+	int temp_interval_nochg;
+	int main_safety_tmr_h;
+	int usb_safety_tmr_h;
+	int bkup_bat_v;
+	int bkup_bat_i;
+	bool no_maintenance;
+	bool chg_unknown_bat;
+	bool enable_overshoot;
+	enum abx500_adc_therm adc_therm;
+	int fg_res;
+	int n_btypes;
+	int batt_id;
+	int interval_charging;
+	int interval_not_charging;
+	int temp_hysteresis;
+	int gnd_lift_resistance;
+	const struct ab8500_maxim_parameters *maxi;
+	const struct ab8500_bm_capacity_levels *cap_levels;
+	const struct ab8500_bm_charger_parameters *chg_params;
+	const struct ab8500_fg_parameters *fg_params;
+};
+
+struct ab8500_charger_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+	bool autopower_cfg;
+};
+
+struct ab8500_btemp_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+};
+
+struct ab8500_fg_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+};
+
+struct ab8500_chargalg_platform_data {
+	char **supplied_to;
+	size_t num_supplicants;
+};
+struct ab8500_btemp;
+struct ab8500_gpadc;
+struct ab8500_fg;
+#ifdef CONFIG_AB8500_BM
+void ab8500_fg_reinit(void);
+void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
+struct ab8500_btemp *ab8500_btemp_get(void);
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
+struct ab8500_fg *ab8500_fg_get(void);
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
+int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
+int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
+
+#else
+int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
+{
+}
+static void ab8500_fg_reinit(void)
+{
+}
+static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA)
+{
+}
+static struct ab8500_btemp *ab8500_btemp_get(void)
+{
+	return NULL;
+}
+static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+	return 0;
+}
+struct ab8500_fg *ab8500_fg_get(void)
+{
+	return NULL;
+}
+static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev)
+{
+	return -ENODEV;
+}
+
+static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+{
+	return -ENODEV;
+}
+
+static inline int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
+{
+	return -ENODEV;
+}
+
+#endif
+#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
new file mode 100644
index 0000000..9b07725
--- /dev/null
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _UX500_CHARGALG_H
+#define _UX500_CHARGALG_H
+
+#include <linux/power_supply.h>
+
+#define psy_to_ux500_charger(x) container_of((x), \
+		struct ux500_charger, psy)
+
+/* Forward declaration */
+struct ux500_charger;
+
+struct ux500_charger_ops {
+	int (*enable) (struct ux500_charger *, int, int, int);
+	int (*kick_wd) (struct ux500_charger *);
+	int (*update_curr) (struct ux500_charger *, int);
+};
+
+/**
+ * struct ux500_charger - power supply ux500 charger sub class
+ * @psy			power supply base class
+ * @ops			ux500 charger operations
+ * @max_out_volt	maximum output charger voltage in mV
+ * @max_out_curr	maximum output charger current in mA
+ */
+struct ux500_charger {
+	struct power_supply psy;
+	struct ux500_charger_ops ops;
+	int max_out_volt;
+	int max_out_curr;
+};
+
+#endif
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index c4eec22..650ef35 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -112,6 +112,11 @@
 #define NAND_BBT_USE_FLASH	0x00020000
 /* Do not store flash based bad block table in OOB area; store it in-band */
 #define NAND_BBT_NO_OOB		0x00040000
+/*
+ * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
+ * entire spare area. Must be used with NAND_BBT_USE_FLASH.
+ */
+#define NAND_BBT_NO_OOB_BBM	0x00080000
 
 /*
  * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 1bbd9f2..ed270bd 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -47,6 +47,7 @@
 	struct request_queue *rq;
 	spinlock_t queue_lock;
 	void *priv;
+	fmode_t file_mode;
 };
 
 struct mtd_blktrans_ops {
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index 6987995..b200292 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -26,95 +26,83 @@
 #define FSMC_NAND_BW8		1
 #define FSMC_NAND_BW16		2
 
-/*
- * The placement of the Command Latch Enable (CLE) and
- * Address Latch Enable (ALE) is twisted around in the
- * SPEAR310 implementation.
- */
-#if defined(CONFIG_MACH_SPEAR310)
-#define PLAT_NAND_CLE		(1 << 17)
-#define PLAT_NAND_ALE		(1 << 16)
-#else
-#define PLAT_NAND_CLE		(1 << 16)
-#define PLAT_NAND_ALE		(1 << 17)
-#endif
-
 #define FSMC_MAX_NOR_BANKS	4
 #define FSMC_MAX_NAND_BANKS	4
 
 #define FSMC_FLASH_WIDTH8	1
 #define FSMC_FLASH_WIDTH16	2
 
-struct fsmc_nor_bank_regs {
-	uint32_t ctrl;
-	uint32_t ctrl_tim;
-};
+/* fsmc controller registers for NOR flash */
+#define CTRL			0x0
+	/* ctrl register definitions */
+	#define BANK_ENABLE		(1 << 0)
+	#define MUXED			(1 << 1)
+	#define NOR_DEV			(2 << 2)
+	#define WIDTH_8			(0 << 4)
+	#define WIDTH_16		(1 << 4)
+	#define RSTPWRDWN		(1 << 6)
+	#define WPROT			(1 << 7)
+	#define WRT_ENABLE		(1 << 12)
+	#define WAIT_ENB		(1 << 13)
 
-/* ctrl register definitions */
-#define BANK_ENABLE		(1 << 0)
-#define MUXED			(1 << 1)
-#define NOR_DEV			(2 << 2)
-#define WIDTH_8			(0 << 4)
-#define WIDTH_16		(1 << 4)
-#define RSTPWRDWN		(1 << 6)
-#define WPROT			(1 << 7)
-#define WRT_ENABLE		(1 << 12)
-#define WAIT_ENB		(1 << 13)
+#define CTRL_TIM		0x4
+	/* ctrl_tim register definitions */
 
-/* ctrl_tim register definitions */
-
-struct fsmc_nand_bank_regs {
-	uint32_t pc;
-	uint32_t sts;
-	uint32_t comm;
-	uint32_t attrib;
-	uint32_t ioata;
-	uint32_t ecc1;
-	uint32_t ecc2;
-	uint32_t ecc3;
-};
-
+#define FSMC_NOR_BANK_SZ	0x8
 #define FSMC_NOR_REG_SIZE	0x40
 
-struct fsmc_regs {
-	struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS];
-	uint8_t reserved_1[0x40 - 0x20];
-	struct fsmc_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS];
-	uint8_t reserved_2[0xfe0 - 0xc0];
-	uint32_t peripid0;			/* 0xfe0 */
-	uint32_t peripid1;			/* 0xfe4 */
-	uint32_t peripid2;			/* 0xfe8 */
-	uint32_t peripid3;			/* 0xfec */
-	uint32_t pcellid0;			/* 0xff0 */
-	uint32_t pcellid1;			/* 0xff4 */
-	uint32_t pcellid2;			/* 0xff8 */
-	uint32_t pcellid3;			/* 0xffc */
-};
+#define FSMC_NOR_REG(base, bank, reg)		(base + \
+						FSMC_NOR_BANK_SZ * (bank) + \
+						reg)
+
+/* fsmc controller registers for NAND flash */
+#define PC			0x00
+	/* pc register definitions */
+	#define FSMC_RESET		(1 << 0)
+	#define FSMC_WAITON		(1 << 1)
+	#define FSMC_ENABLE		(1 << 2)
+	#define FSMC_DEVTYPE_NAND	(1 << 3)
+	#define FSMC_DEVWID_8		(0 << 4)
+	#define FSMC_DEVWID_16		(1 << 4)
+	#define FSMC_ECCEN		(1 << 6)
+	#define FSMC_ECCPLEN_512	(0 << 7)
+	#define FSMC_ECCPLEN_256	(1 << 7)
+	#define FSMC_TCLR_1		(1)
+	#define FSMC_TCLR_SHIFT		(9)
+	#define FSMC_TCLR_MASK		(0xF)
+	#define FSMC_TAR_1		(1)
+	#define FSMC_TAR_SHIFT		(13)
+	#define FSMC_TAR_MASK		(0xF)
+#define STS			0x04
+	/* sts register definitions */
+	#define FSMC_CODE_RDY		(1 << 15)
+#define COMM			0x08
+	/* comm register definitions */
+	#define FSMC_TSET_0		0
+	#define FSMC_TSET_SHIFT		0
+	#define FSMC_TSET_MASK		0xFF
+	#define FSMC_TWAIT_6		6
+	#define FSMC_TWAIT_SHIFT	8
+	#define FSMC_TWAIT_MASK		0xFF
+	#define FSMC_THOLD_4		4
+	#define FSMC_THOLD_SHIFT	16
+	#define FSMC_THOLD_MASK		0xFF
+	#define FSMC_THIZ_1		1
+	#define FSMC_THIZ_SHIFT		24
+	#define FSMC_THIZ_MASK		0xFF
+#define ATTRIB			0x0C
+#define IOATA			0x10
+#define ECC1			0x14
+#define ECC2			0x18
+#define ECC3			0x1C
+#define FSMC_NAND_BANK_SZ	0x20
+
+#define FSMC_NAND_REG(base, bank, reg)		(base + FSMC_NOR_REG_SIZE + \
+						(FSMC_NAND_BANK_SZ * (bank)) + \
+						reg)
 
 #define FSMC_BUSY_WAIT_TIMEOUT	(1 * HZ)
 
-/* pc register definitions */
-#define FSMC_RESET		(1 << 0)
-#define FSMC_WAITON		(1 << 1)
-#define FSMC_ENABLE		(1 << 2)
-#define FSMC_DEVTYPE_NAND	(1 << 3)
-#define FSMC_DEVWID_8		(0 << 4)
-#define FSMC_DEVWID_16		(1 << 4)
-#define FSMC_ECCEN		(1 << 6)
-#define FSMC_ECCPLEN_512	(0 << 7)
-#define FSMC_ECCPLEN_256	(1 << 7)
-#define FSMC_TCLR_1		(1 << 9)
-#define FSMC_TAR_1		(1 << 13)
-
-/* sts register definitions */
-#define FSMC_CODE_RDY		(1 << 15)
-
-/* comm register definitions */
-#define FSMC_TSET_0		(0 << 0)
-#define FSMC_TWAIT_6		(6 << 8)
-#define FSMC_THOLD_4		(4 << 16)
-#define FSMC_THIZ_1		(1 << 24)
-
 /*
  * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
  * and it has to be read consecutively and immediately after the 512
@@ -133,6 +121,20 @@
 	struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
 };
 
+struct fsmc_nand_timings {
+	uint8_t tclr;
+	uint8_t tar;
+	uint8_t thiz;
+	uint8_t thold;
+	uint8_t twait;
+	uint8_t tset;
+};
+
+enum access_mode {
+	USE_DMA_ACCESS = 1,
+	USE_WORD_ACCESS,
+};
+
 /**
  * fsmc_nand_platform_data - platform specific NAND controller config
  * @partitions: partition table for the platform, use a default fallback
@@ -146,12 +148,23 @@
  * this may be set to NULL
  */
 struct fsmc_nand_platform_data {
+	struct fsmc_nand_timings *nand_timings;
 	struct mtd_partition	*partitions;
 	unsigned int		nr_partitions;
 	unsigned int		options;
 	unsigned int		width;
 	unsigned int		bank;
+
+	/* CLE, ALE offsets */
+	unsigned int		cle_off;
+	unsigned int		ale_off;
+	enum access_mode	mode;
+
 	void			(*select_bank)(uint32_t bank, uint32_t busw);
+
+	/* priv structures for dma accesses */
+	void			*read_dma_priv;
+	void			*write_dma_priv;
 };
 
 extern int __init fsmc_nor_init(struct platform_device *pdev,
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index d43dc25..cf5ea8c 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -164,6 +164,9 @@
 	/* ECC layout structure pointer - read only! */
 	struct nand_ecclayout *ecclayout;
 
+	/* max number of correctible bit errors per writesize */
+	unsigned int ecc_strength;
+
 	/* Data for variable erase regions. If numeraseregions is zero,
 	 * it means that the whole device has erasesize as given above.
 	 */
@@ -174,52 +177,52 @@
 	 * Do not call via these pointers, use corresponding mtd_*()
 	 * wrappers instead.
 	 */
-	int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
-	int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
-		      size_t *retlen, void **virt, resource_size_t *phys);
-	void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
-	unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
-					    unsigned long len,
-					    unsigned long offset,
-					    unsigned long flags);
-	int (*read) (struct mtd_info *mtd, loff_t from, size_t len,
-		     size_t *retlen, u_char *buf);
-	int (*write) (struct mtd_info *mtd, loff_t to, size_t len,
-		      size_t *retlen, const u_char *buf);
-	int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
-			    size_t *retlen, const u_char *buf);
-	int (*read_oob) (struct mtd_info *mtd, loff_t from,
-			 struct mtd_oob_ops *ops);
-	int (*write_oob) (struct mtd_info *mtd, loff_t to,
+	int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
+	int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
+		       size_t *retlen, void **virt, resource_size_t *phys);
+	int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
+	unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
+					     unsigned long len,
+					     unsigned long offset,
+					     unsigned long flags);
+	int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
+		      size_t *retlen, u_char *buf);
+	int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
+		       size_t *retlen, const u_char *buf);
+	int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
+			     size_t *retlen, const u_char *buf);
+	int (*_read_oob) (struct mtd_info *mtd, loff_t from,
 			  struct mtd_oob_ops *ops);
-	int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
-				   size_t len);
-	int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
-				   size_t len, size_t *retlen, u_char *buf);
-	int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
-				   size_t len);
-	int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
-				   size_t len, size_t *retlen, u_char *buf);
-	int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len,
-				    size_t *retlen, u_char *buf);
-	int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
-				   size_t len);
-	int (*writev) (struct mtd_info *mtd, const struct kvec *vecs,
+	int (*_write_oob) (struct mtd_info *mtd, loff_t to,
+			   struct mtd_oob_ops *ops);
+	int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+				    size_t len);
+	int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
+				    size_t len, size_t *retlen, u_char *buf);
+	int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+				    size_t len);
+	int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+				    size_t len, size_t *retlen, u_char *buf);
+	int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
+				     size_t len, size_t *retlen, u_char *buf);
+	int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+				    size_t len);
+	int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
 			unsigned long count, loff_t to, size_t *retlen);
-	void (*sync) (struct mtd_info *mtd);
-	int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
-	int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
-	int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
-	int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
-	int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
-	int (*suspend) (struct mtd_info *mtd);
-	void (*resume) (struct mtd_info *mtd);
+	void (*_sync) (struct mtd_info *mtd);
+	int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+	int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+	int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+	int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
+	int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+	int (*_suspend) (struct mtd_info *mtd);
+	void (*_resume) (struct mtd_info *mtd);
 	/*
 	 * If the driver is something smart, like UBI, it may need to maintain
 	 * its own reference counting. The below functions are only for driver.
 	 */
-	int (*get_device) (struct mtd_info *mtd);
-	void (*put_device) (struct mtd_info *mtd);
+	int (*_get_device) (struct mtd_info *mtd);
+	void (*_put_device) (struct mtd_info *mtd);
 
 	/* Backing device capabilities for this device
 	 * - provides mmap capabilities
@@ -240,214 +243,75 @@
 	int usecount;
 };
 
-/*
- * Erase is an asynchronous operation.  Device drivers are supposed
- * to call instr->callback() whenever the operation completes, even
- * if it completes with a failure.
- * Callers are supposed to pass a callback function and wait for it
- * to be called before writing to the block.
- */
-static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-	return mtd->erase(mtd, instr);
-}
-
-/*
- * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
- */
-static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
-			    size_t *retlen, void **virt, resource_size_t *phys)
-{
-	*retlen = 0;
-	if (!mtd->point)
-		return -EOPNOTSUPP;
-	return mtd->point(mtd, from, len, retlen, virt, phys);
-}
-
-/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
-static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
-{
-	return mtd->unpoint(mtd, from, len);
-}
-
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
-						  unsigned long len,
-						  unsigned long offset,
-						  unsigned long flags)
-{
-	if (!mtd->get_unmapped_area)
-		return -EOPNOTSUPP;
-	return mtd->get_unmapped_area(mtd, len, offset, flags);
-}
-
-static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
-			   size_t *retlen, u_char *buf)
-{
-	return mtd->read(mtd, from, len, retlen, buf);
-}
-
-static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
-			    size_t *retlen, const u_char *buf)
-{
-	*retlen = 0;
-	if (!mtd->write)
-		return -EROFS;
-	return mtd->write(mtd, to, len, retlen, buf);
-}
-
-/*
- * In blackbox flight recorder like scenarios we want to make successful writes
- * in interrupt context. panic_write() is only intended to be called when its
- * known the kernel is about to panic and we need the write to succeed. Since
- * the kernel is not going to be running for much longer, this function can
- * break locks and delay to ensure the write succeeds (but not sleep).
- */
-static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
-				  size_t *retlen, const u_char *buf)
-{
-	*retlen = 0;
-	if (!mtd->panic_write)
-		return -EOPNOTSUPP;
-	return mtd->panic_write(mtd, to, len, retlen, buf);
-}
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+	      void **virt, resource_size_t *phys);
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+				    unsigned long offset, unsigned long flags);
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+	     u_char *buf);
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+	      const u_char *buf);
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+		    const u_char *buf);
 
 static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
 			       struct mtd_oob_ops *ops)
 {
 	ops->retlen = ops->oobretlen = 0;
-	if (!mtd->read_oob)
+	if (!mtd->_read_oob)
 		return -EOPNOTSUPP;
-	return mtd->read_oob(mtd, from, ops);
+	return mtd->_read_oob(mtd, from, ops);
 }
 
 static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
 				struct mtd_oob_ops *ops)
 {
 	ops->retlen = ops->oobretlen = 0;
-	if (!mtd->write_oob)
+	if (!mtd->_write_oob)
 		return -EOPNOTSUPP;
-	return mtd->write_oob(mtd, to, ops);
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	return mtd->_write_oob(mtd, to, ops);
 }
 
-/*
- * Method to access the protection register area, present in some flash
- * devices. The user data is one time programmable but the factory data is read
- * only.
- */
-static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
-					 struct otp_info *buf, size_t len)
-{
-	if (!mtd->get_fact_prot_info)
-		return -EOPNOTSUPP;
-	return mtd->get_fact_prot_info(mtd, buf, len);
-}
-
-static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
-					 size_t len, size_t *retlen,
-					 u_char *buf)
-{
-	*retlen = 0;
-	if (!mtd->read_fact_prot_reg)
-		return -EOPNOTSUPP;
-	return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
-}
-
-static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
-					 struct otp_info *buf,
-					 size_t len)
-{
-	if (!mtd->get_user_prot_info)
-		return -EOPNOTSUPP;
-	return mtd->get_user_prot_info(mtd, buf, len);
-}
-
-static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
-					 size_t len, size_t *retlen,
-					 u_char *buf)
-{
-	*retlen = 0;
-	if (!mtd->read_user_prot_reg)
-		return -EOPNOTSUPP;
-	return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
-}
-
-static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
-					  size_t len, size_t *retlen,
-					  u_char *buf)
-{
-	*retlen = 0;
-	if (!mtd->write_user_prot_reg)
-		return -EOPNOTSUPP;
-	return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
-}
-
-static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
-					 size_t len)
-{
-	if (!mtd->lock_user_prot_reg)
-		return -EOPNOTSUPP;
-	return mtd->lock_user_prot_reg(mtd, from, len);
-}
+int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+			   size_t len);
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+			   size_t *retlen, u_char *buf);
+int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+			   size_t len);
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+			   size_t *retlen, u_char *buf);
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+			    size_t *retlen, u_char *buf);
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
 
 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
 	       unsigned long count, loff_t to, size_t *retlen);
 
 static inline void mtd_sync(struct mtd_info *mtd)
 {
-	if (mtd->sync)
-		mtd->sync(mtd);
+	if (mtd->_sync)
+		mtd->_sync(mtd);
 }
 
-/* Chip-supported device locking */
-static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	if (!mtd->lock)
-		return -EOPNOTSUPP;
-	return mtd->lock(mtd, ofs, len);
-}
-
-static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	if (!mtd->unlock)
-		return -EOPNOTSUPP;
-	return mtd->unlock(mtd, ofs, len);
-}
-
-static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	if (!mtd->is_locked)
-		return -EOPNOTSUPP;
-	return mtd->is_locked(mtd, ofs, len);
-}
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
 
 static inline int mtd_suspend(struct mtd_info *mtd)
 {
-	return mtd->suspend ? mtd->suspend(mtd) : 0;
+	return mtd->_suspend ? mtd->_suspend(mtd) : 0;
 }
 
 static inline void mtd_resume(struct mtd_info *mtd)
 {
-	if (mtd->resume)
-		mtd->resume(mtd);
-}
-
-static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
-	if (!mtd->block_isbad)
-		return 0;
-	return mtd->block_isbad(mtd, ofs);
-}
-
-static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	if (!mtd->block_markbad)
-		return -EOPNOTSUPP;
-	return mtd->block_markbad(mtd, ofs);
+	if (mtd->_resume)
+		mtd->_resume(mtd);
 }
 
 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -482,12 +346,12 @@
 
 static inline int mtd_has_oob(const struct mtd_info *mtd)
 {
-	return mtd->read_oob && mtd->write_oob;
+	return mtd->_read_oob && mtd->_write_oob;
 }
 
 static inline int mtd_can_have_bb(const struct mtd_info *mtd)
 {
-	return !!mtd->block_isbad;
+	return !!mtd->_block_isbad;
 }
 
 	/* Kernel-side ioctl definitions */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 63b5a8b..1482340 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,6 +324,7 @@
  * @steps:	number of ECC steps per page
  * @size:	data bytes per ECC step
  * @bytes:	ECC bytes per step
+ * @strength:	max number of correctible bits per ECC step
  * @total:	total number of ECC bytes per page
  * @prepad:	padding information for syndrome based ECC generators
  * @postpad:	padding information for syndrome based ECC generators
@@ -351,6 +352,7 @@
 	int size;
 	int bytes;
 	int total;
+	int strength;
 	int prepad;
 	int postpad;
 	struct nand_ecclayout	*layout;
@@ -448,8 +450,9 @@
  *			will be copied to the appropriate nand_bbt_descr's.
  * @badblockpos:	[INTERN] position of the bad block marker in the oob
  *			area.
- * @badblockbits:	[INTERN] number of bits to left-shift the bad block
- *			number
+ * @badblockbits:	[INTERN] minimum number of set bits in a good block's
+ *			bad block marker position; i.e., BBM == 11110111b is
+ *			not bad when badblockbits == 7
  * @cellinfo:		[INTERN] MLC/multichip data from chip ident
  * @numchips:		[INTERN] number of physical chips
  * @chipsize:		[INTERN] the size of one chip for multichip arrays
diff --git a/include/linux/mtd/pmc551.h b/include/linux/mtd/pmc551.h
deleted file mode 100644
index 27ad40a..0000000
--- a/include/linux/mtd/pmc551.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * PMC551 PCI Mezzanine Ram Device
- *
- * Author:
- *       Mark Ferrell
- *       Copyright 1999,2000 Nortel Networks
- *
- * License:
- *	 As part of this driver was derrived from the slram.c driver it falls
- *	 under the same license, which is GNU General Public License v2
- */
-
-#ifndef __MTD_PMC551_H__
-#define __MTD_PMC551_H__
-
-#include <linux/mtd/mtd.h>
-
-#define PMC551_VERSION \
-       "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
-
-/*
- * Our personal and private information
- */
-struct mypriv {
-        struct pci_dev *dev;
-        u_char *start;
-        u32    base_map0;
-        u32    curr_map0;
-        u32    asize;
-	struct mtd_info *nextpmc551;
-};
-
-/*
- * Function Prototypes
- */
-static int pmc551_erase(struct mtd_info *, struct erase_info *);
-static void pmc551_unpoint(struct mtd_info *, loff_t, size_t);
-static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
-		size_t *retlen, void **virt, resource_size_t *phys);
-static int pmc551_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
-
-
-/*
- * Define the PCI ID's if the kernel doesn't define them for us
- */
-#ifndef PCI_VENDOR_ID_V3_SEMI
-#define PCI_VENDOR_ID_V3_SEMI             0x11b0
-#endif
-
-#ifndef PCI_DEVICE_ID_V3_SEMI_V370PDC
-#define PCI_DEVICE_ID_V3_SEMI_V370PDC     0x0200
-#endif
-
-
-#define PMC551_PCI_MEM_MAP0	0x50
-#define PMC551_PCI_MEM_MAP1	0x54
-#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK	0x3ff00000
-#define PMC551_PCI_MEM_MAP_APERTURE_MASK	0x000000f0
-#define PMC551_PCI_MEM_MAP_REG_EN		0x00000002
-#define PMC551_PCI_MEM_MAP_ENABLE		0x00000001
-
-#define PMC551_SDRAM_MA		0x60
-#define PMC551_SDRAM_CMD	0x62
-#define PMC551_DRAM_CFG		0x64
-#define PMC551_SYS_CTRL_REG	0x78
-
-#define PMC551_DRAM_BLK0	0x68
-#define PMC551_DRAM_BLK1	0x6c
-#define PMC551_DRAM_BLK2	0x70
-#define PMC551_DRAM_BLK3	0x74
-#define PMC551_DRAM_BLK_GET_SIZE(x) (524288<<((x>>4)&0x0f))
-#define PMC551_DRAM_BLK_SET_COL_MUX(x,v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
-#define PMC551_DRAM_BLK_SET_ROW_MUX(x,v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
-
-
-#endif /* __MTD_PMC551_H__ */
-
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 9cf4c4c..a38e1fa 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -23,6 +23,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/pm_qos.h>
 
 /* FLCTL registers */
 #define FLCMNCR(f)		(f->reg + 0x0)
@@ -38,6 +39,7 @@
 #define FLDTFIFO(f)		(f->reg + 0x24)
 #define FLECFIFO(f)		(f->reg + 0x28)
 #define FLTRCR(f)		(f->reg + 0x2C)
+#define FLHOLDCR(f)		(f->reg + 0x38)
 #define	FL4ECCRESULT0(f)	(f->reg + 0x80)
 #define	FL4ECCRESULT1(f)	(f->reg + 0x84)
 #define	FL4ECCRESULT2(f)	(f->reg + 0x88)
@@ -67,6 +69,30 @@
 #define	CE0_ENABLE	(0x1 << 3)	/* Chip Enable 0 */
 #define	TYPESEL_SET	(0x1 << 0)
 
+/*
+ * Clock settings using the PULSEx registers from FLCMNCR
+ *
+ * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E
+ * to control the clock divider used between the High-Speed Peripheral Clock
+ * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit
+ * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16
+ * bit version the divider is seperate for the pulse width of high and low
+ * signals.
+ */
+#define PULSE3	(0x1 << 27)
+#define PULSE2	(0x1 << 17)
+#define PULSE1	(0x1 << 15)
+#define PULSE0	(0x1 << 9)
+#define CLK_8B_0_5			PULSE1
+#define CLK_8B_1			0x0
+#define CLK_8B_1_5			(PULSE1 | PULSE2)
+#define CLK_8B_2			PULSE0
+#define CLK_8B_3			(PULSE0 | PULSE1 | PULSE2)
+#define CLK_8B_4			(PULSE0 | PULSE2)
+#define CLK_16B_6L_2H			PULSE0
+#define CLK_16B_9L_3H			(PULSE0 | PULSE1 | PULSE2)
+#define CLK_16B_12L_4H			(PULSE0 | PULSE2)
+
 /* FLCMDCR control bits */
 #define ADRCNT2_E	(0x1 << 31)	/* 5byte address enable */
 #define ADRMD_E		(0x1 << 26)	/* Sector address access */
@@ -85,6 +111,15 @@
 #define TRSTRT		(0x1 << 0)	/* translation start */
 #define TREND		(0x1 << 1)	/* translation end */
 
+/*
+ * FLHOLDCR control bits
+ *
+ * HOLDEN: Bus Occupancy Enable (inverted)
+ * Enable this bit when the external bus might be used in between transfers.
+ * If not set and the bus gets used by other modules, a deadlock occurs.
+ */
+#define HOLDEN		(0x1 << 0)
+
 /* FL4ECCCR control bits */
 #define	_4ECCFA		(0x1 << 2)	/* 4 symbols correct fault */
 #define	_4ECCEND	(0x1 << 1)	/* 4 symbols end */
@@ -97,6 +132,7 @@
 	struct mtd_info		mtd;
 	struct nand_chip	chip;
 	struct platform_device	*pdev;
+	struct dev_pm_qos_request pm_qos;
 	void __iomem		*reg;
 
 	uint8_t	done_buff[2048 + 64];	/* max size 2048 + 64 */
@@ -108,11 +144,14 @@
 	int	erase1_page_addr;	/* page_addr in ERASE1 cmd */
 	uint32_t erase_ADRCNT;		/* bits of FLCMDCR in ERASE1 cmd */
 	uint32_t rw_ADRCNT;	/* bits of FLCMDCR in READ WRITE cmd */
+	uint32_t flcmncr_base;	/* base value of FLCMNCR */
 
 	int	hwecc_cant_correct[4];
 
 	unsigned page_size:1;	/* NAND page size (0 = 512, 1 = 2048) */
 	unsigned hwecc:1;	/* Hardware ECC (0 = disabled, 1 = enabled) */
+	unsigned holden:1;	/* Hardware has FLHOLDCR and HOLDEN is set */
+	unsigned qos_request:1;	/* QoS request to prevent deep power shutdown */
 };
 
 struct sh_flctl_platform_data {
@@ -121,6 +160,7 @@
 	unsigned long		flcmncr_val;
 
 	unsigned has_hwecc:1;
+	unsigned use_holden:1;
 };
 
 static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
new file mode 100644
index 0000000..8ae1726
--- /dev/null
+++ b/include/linux/mtd/spear_smi.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2010 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MTD_SPEAR_SMI_H
+#define __MTD_SPEAR_SMI_H
+
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+/* max possible slots for serial-nor flash chip in the SMI controller */
+#define MAX_NUM_FLASH_CHIP	4
+
+/* macro to define partitions for flash devices */
+#define DEFINE_PARTS(n, of, s)		\
+{					\
+	.name = n,			\
+	.offset = of,			\
+	.size = s,			\
+}
+
+/**
+ * struct spear_smi_flash_info - platform structure for passing flash
+ * information
+ *
+ * name: name of the serial nor flash for identification
+ * mem_base: the memory base on which the flash is mapped
+ * size: size of the flash in bytes
+ * partitions: parition details
+ * nr_partitions: number of partitions
+ * fast_mode: whether flash supports fast mode
+ */
+
+struct spear_smi_flash_info {
+	char *name;
+	unsigned long mem_base;
+	unsigned long size;
+	struct mtd_partition *partitions;
+	int nr_partitions;
+	u8 fast_mode;
+};
+
+/**
+ * struct spear_smi_plat_data - platform structure for configuring smi
+ *
+ * clk_rate: clk rate at which SMI must operate
+ * num_flashes: number of flashes present on board
+ * board_flash_info: specific details of each flash present on board
+ */
+struct spear_smi_plat_data {
+	unsigned long clk_rate;
+	int num_flashes;
+	struct spear_smi_flash_info *board_flash_info;
+	struct device_node *np[MAX_NUM_FLASH_CHIP];
+};
+
+#endif /* __MTD_SPEAR_SMI_H */
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
index 8f82575..18543e2 100644
--- a/include/linux/mtio.h
+++ b/include/linux/mtio.h
@@ -194,6 +194,7 @@
 #define MT_ST_SYSV              0x1000
 #define MT_ST_NOWAIT            0x2000
 #define MT_ST_SILI		0x4000
+#define MT_ST_NOWAIT_EOF	0x8000
 
 /* The mode parameters to be controlled. Parameter chosen with bits 20-28 */
 #define MT_ST_CLEAR_DEFAULT	0xfffff
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1f77540..5cbaa20 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2604,8 +2604,6 @@
 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
 extern void dev_seq_stop(struct seq_file *seq, void *v);
-extern int dev_seq_open_ops(struct inode *inode, struct file *file,
-			    const struct seq_operations *ops);
 #endif
 
 extern int netdev_class_create_file(struct class_attribute *class_attr);
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
index c0405ac..e3a9978 100644
--- a/include/linux/netfilter/xt_set.h
+++ b/include/linux/netfilter/xt_set.h
@@ -58,8 +58,8 @@
 struct xt_set_info_target_v2 {
 	struct xt_set_info add_set;
 	struct xt_set_info del_set;
-	u32 flags;
-	u32 timeout;
+	__u32 flags;
+	__u32 timeout;
 };
 
 #endif /*_XT_SET_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index f549adc..1bc898b 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -287,7 +287,17 @@
 				  struct xt_table *table);
 
 /* Check for an extension */
-extern int ip6t_ext_hdr(u8 nexthdr);
+static inline int
+ip6t_ext_hdr(u8 nexthdr)
+{	return (nexthdr == IPPROTO_HOPOPTS) ||
+	       (nexthdr == IPPROTO_ROUTING) ||
+	       (nexthdr == IPPROTO_FRAGMENT) ||
+	       (nexthdr == IPPROTO_ESP) ||
+	       (nexthdr == IPPROTO_AH) ||
+	       (nexthdr == IPPROTO_NONE) ||
+	       (nexthdr == IPPROTO_DSTOPTS);
+}
+
 /* find specified header and get offset to it */
 extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
 			 int target, unsigned short *fragoff);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index bd9f55a..ddbb6a9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -299,18 +299,31 @@
 	/*
 	 * Bits needed to read the hw events in user-space.
 	 *
-	 *   u32 seq;
-	 *   s64 count;
+	 *   u32 seq, time_mult, time_shift, idx, width;
+	 *   u64 count, enabled, running;
+	 *   u64 cyc, time_offset;
+	 *   s64 pmc = 0;
 	 *
 	 *   do {
 	 *     seq = pc->lock;
-	 *
 	 *     barrier()
-	 *     if (pc->index) {
-	 *       count = pmc_read(pc->index - 1);
-	 *       count += pc->offset;
-	 *     } else
-	 *       goto regular_read;
+	 *
+	 *     enabled = pc->time_enabled;
+	 *     running = pc->time_running;
+	 *
+	 *     if (pc->cap_usr_time && enabled != running) {
+	 *       cyc = rdtsc();
+	 *       time_offset = pc->time_offset;
+	 *       time_mult   = pc->time_mult;
+	 *       time_shift  = pc->time_shift;
+	 *     }
+	 *
+	 *     idx = pc->index;
+	 *     count = pc->offset;
+	 *     if (pc->cap_usr_rdpmc && idx) {
+	 *       width = pc->pmc_width;
+	 *       pmc = rdpmc(idx - 1);
+	 *     }
 	 *
 	 *     barrier();
 	 *   } while (pc->lock != seq);
@@ -323,14 +336,57 @@
 	__s64	offset;			/* add to hardware event value */
 	__u64	time_enabled;		/* time event active */
 	__u64	time_running;		/* time event on cpu */
-	__u32	time_mult, time_shift;
+	union {
+		__u64	capabilities;
+		__u64	cap_usr_time  : 1,
+			cap_usr_rdpmc : 1,
+			cap_____res   : 62;
+	};
+
+	/*
+	 * If cap_usr_rdpmc this field provides the bit-width of the value
+	 * read using the rdpmc() or equivalent instruction. This can be used
+	 * to sign extend the result like:
+	 *
+	 *   pmc <<= 64 - width;
+	 *   pmc >>= 64 - width; // signed shift right
+	 *   count += pmc;
+	 */
+	__u16	pmc_width;
+
+	/*
+	 * If cap_usr_time the below fields can be used to compute the time
+	 * delta since time_enabled (in ns) using rdtsc or similar.
+	 *
+	 *   u64 quot, rem;
+	 *   u64 delta;
+	 *
+	 *   quot = (cyc >> time_shift);
+	 *   rem = cyc & ((1 << time_shift) - 1);
+	 *   delta = time_offset + quot * time_mult +
+	 *              ((rem * time_mult) >> time_shift);
+	 *
+	 * Where time_offset,time_mult,time_shift and cyc are read in the
+	 * seqcount loop described above. This delta can then be added to
+	 * enabled and possible running (if idx), improving the scaling:
+	 *
+	 *   enabled += delta;
+	 *   if (idx)
+	 *     running += delta;
+	 *
+	 *   quot = count / running;
+	 *   rem  = count % running;
+	 *   count = quot * enabled + (rem * enabled) / running;
+	 */
+	__u16	time_shift;
+	__u32	time_mult;
 	__u64	time_offset;
 
 		/*
 		 * Hole for extension of the self monitor capabilities
 		 */
 
-	__u64	__reserved[121];	/* align to 1k */
+	__u64	__reserved[120];	/* align to 1k */
 
 	/*
 	 * Control data for the mmap() data buffer.
@@ -550,6 +606,7 @@
 #include <linux/irq_work.h>
 #include <linux/static_key.h>
 #include <linux/atomic.h>
+#include <linux/sysfs.h>
 #include <asm/local.h>
 
 #define PERF_MAX_STACK_DEPTH		255
@@ -1291,5 +1348,18 @@
 	register_cpu_notifier(&fn##_nb);				\
 } while (0)
 
+
+#define PMU_FORMAT_ATTR(_name, _format)					\
+static ssize_t								\
+_name##_show(struct device *dev,					\
+			       struct device_attribute *attr,		\
+			       char *page)				\
+{									\
+	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
+	return sprintf(page, _format "\n");				\
+}									\
+									\
+static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index d056263..b0f2c56 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -4,8 +4,8 @@
  * GPL v2 Only
  */
 
-#ifndef __ATMEL_NAND_H__
-#define __ATMEL_NAND_H__
+#ifndef __ATMEL_H__
+#define __ATMEL_H__
 
 #include <linux/mtd/nand.h>
 
@@ -24,4 +24,4 @@
 	unsigned int	num_parts;
 };
 
-#endif /* __ATMEL_NAND_H__ */
+#endif /* __ATMEL_H__ */
diff --git a/include/linux/platform_data/spear_thermal.h b/include/linux/platform_data/spear_thermal.h
new file mode 100644
index 0000000..724f2e1
--- /dev/null
+++ b/include/linux/platform_data/spear_thermal.h
@@ -0,0 +1,26 @@
+/*
+ * SPEAr thermal driver platform data.
+ *
+ * Copyright (C) 2011-2012 ST Microelectronics
+ * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SPEAR_THERMAL_H
+#define SPEAR_THERMAL_H
+
+/* SPEAr Thermal Sensor Platform Data */
+struct spear_thermal_pdata {
+	/* flags used to enable thermal sensor */
+	unsigned int thermal_flags;
+};
+
+#endif /* SPEAR_THERMAL_H */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 2e9191a..233149c 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -8,6 +8,7 @@
 #include <linux/notifier.h>
 #include <linux/miscdevice.h>
 #include <linux/device.h>
+#include <linux/workqueue.h>
 
 enum {
 	PM_QOS_RESERVED = 0,
@@ -29,6 +30,7 @@
 struct pm_qos_request {
 	struct plist_node node;
 	int pm_qos_class;
+	struct delayed_work work; /* for pm_qos_update_request_timeout */
 };
 
 struct dev_pm_qos_request {
@@ -73,6 +75,8 @@
 			s32 value);
 void pm_qos_update_request(struct pm_qos_request *req,
 			   s32 new_value);
+void pm_qos_update_request_timeout(struct pm_qos_request *req,
+				   s32 new_value, unsigned long timeout_us);
 void pm_qos_remove_request(struct pm_qos_request *req);
 
 int pm_qos_request(int pm_qos_class);
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index fe99211..e01b167 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -27,6 +27,8 @@
 #define MAX17042_BATTERY_FULL	(100)
 #define MAX17042_DEFAULT_SNS_RESISTOR	(10000)
 
+#define MAX17042_CHARACTERIZATION_DATA_SIZE 48
+
 enum max17042_register {
 	MAX17042_STATUS		= 0x00,
 	MAX17042_VALRT_Th	= 0x01,
@@ -40,11 +42,11 @@
 	MAX17042_VCELL		= 0x09,
 	MAX17042_Current	= 0x0A,
 	MAX17042_AvgCurrent	= 0x0B,
-	MAX17042_Qresidual	= 0x0C,
+
 	MAX17042_SOC		= 0x0D,
 	MAX17042_AvSOC		= 0x0E,
 	MAX17042_RemCap		= 0x0F,
-	MAX17402_FullCAP	= 0x10,
+	MAX17042_FullCAP	= 0x10,
 	MAX17042_TTE		= 0x11,
 	MAX17042_V_empty	= 0x12,
 
@@ -62,14 +64,14 @@
 	MAX17042_AvCap		= 0x1F,
 	MAX17042_ManName	= 0x20,
 	MAX17042_DevName	= 0x21,
-	MAX17042_DevChem	= 0x22,
 
+	MAX17042_FullCAPNom	= 0x23,
 	MAX17042_TempNom	= 0x24,
-	MAX17042_TempCold	= 0x25,
+	MAX17042_TempLim	= 0x25,
 	MAX17042_TempHot	= 0x26,
 	MAX17042_AIN		= 0x27,
 	MAX17042_LearnCFG	= 0x28,
-	MAX17042_SHFTCFG	= 0x29,
+	MAX17042_FilterCFG	= 0x29,
 	MAX17042_RelaxCFG	= 0x2A,
 	MAX17042_MiscCFG	= 0x2B,
 	MAX17042_TGAIN		= 0x2C,
@@ -77,22 +79,41 @@
 	MAX17042_CGAIN		= 0x2E,
 	MAX17042_COFF		= 0x2F,
 
-	MAX17042_Q_empty	= 0x33,
+	MAX17042_MaskSOC	= 0x32,
+	MAX17042_SOC_empty	= 0x33,
 	MAX17042_T_empty	= 0x34,
 
+	MAX17042_FullCAP0       = 0x35,
+	MAX17042_LAvg_empty	= 0x36,
+	MAX17042_FCTC		= 0x37,
 	MAX17042_RCOMP0		= 0x38,
 	MAX17042_TempCo		= 0x39,
-	MAX17042_Rx		= 0x3A,
-	MAX17042_T_empty0	= 0x3B,
+	MAX17042_EmptyTempCo	= 0x3A,
+	MAX17042_K_empty0	= 0x3B,
 	MAX17042_TaskPeriod	= 0x3C,
 	MAX17042_FSTAT		= 0x3D,
 
 	MAX17042_SHDNTIMER	= 0x3F,
 
-	MAX17042_VFRemCap	= 0x4A,
+	MAX17042_dQacc		= 0x45,
+	MAX17042_dPacc		= 0x46,
+
+	MAX17042_VFSOC0		= 0x48,
 
 	MAX17042_QH		= 0x4D,
 	MAX17042_QL		= 0x4E,
+
+	MAX17042_VFSOC0Enable	= 0x60,
+	MAX17042_MLOCKReg1	= 0x62,
+	MAX17042_MLOCKReg2	= 0x63,
+
+	MAX17042_MODELChrTbl	= 0x80,
+
+	MAX17042_OCV		= 0xEE,
+
+	MAX17042_OCVInternal	= 0xFB,
+
+	MAX17042_VFSOC		= 0xFF,
 };
 
 /*
@@ -105,10 +126,64 @@
 	u16 data;
 };
 
+struct max17042_config_data {
+	/* External current sense resistor value in milli-ohms */
+	u32	cur_sense_val;
+
+	/* A/D measurement */
+	u16	tgain;		/* 0x2C */
+	u16	toff;		/* 0x2D */
+	u16	cgain;		/* 0x2E */
+	u16	coff;		/* 0x2F */
+
+	/* Alert / Status */
+	u16	valrt_thresh;	/* 0x01 */
+	u16	talrt_thresh;	/* 0x02 */
+	u16	soc_alrt_thresh;	/* 0x03 */
+	u16	config;		/* 0x01D */
+	u16	shdntimer;	/* 0x03F */
+
+	/* App data */
+	u16	design_cap;	/* 0x18 */
+	u16	ichgt_term;	/* 0x1E */
+
+	/* MG3 config */
+	u16	at_rate;	/* 0x04 */
+	u16	learn_cfg;	/* 0x28 */
+	u16	filter_cfg;	/* 0x29 */
+	u16	relax_cfg;	/* 0x2A */
+	u16	misc_cfg;	/* 0x2B */
+	u16	masksoc;	/* 0x32 */
+
+	/* MG3 save and restore */
+	u16	fullcap;	/* 0x10 */
+	u16	fullcapnom;	/* 0x23 */
+	u16	socempty;	/* 0x33 */
+	u16	lavg_empty;	/* 0x36 */
+	u16	dqacc;		/* 0x45 */
+	u16	dpacc;		/* 0x46 */
+
+	/* Cell technology from power_supply.h */
+	u16	cell_technology;
+
+	/* Cell Data */
+	u16	vempty;		/* 0x12 */
+	u16	temp_nom;	/* 0x24 */
+	u16	temp_lim;	/* 0x25 */
+	u16	fctc;		/* 0x37 */
+	u16	rcomp0;		/* 0x38 */
+	u16	tcompc0;	/* 0x39 */
+	u16	empty_tempco;	/* 0x3A */
+	u16	kempty0;	/* 0x3B */
+	u16	cell_char_tbl[MAX17042_CHARACTERIZATION_DATA_SIZE];
+} __packed;
+
 struct max17042_platform_data {
 	struct max17042_reg_data *init_data;
+	struct max17042_config_data *config_data;
 	int num_init_data; /* Number of enties in init_data array */
 	bool enable_current_sense;
+	bool enable_por_init; /* Use POR init from Maxim appnote */
 
 	/*
 	 * R_sns in micro-ohms.
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
new file mode 100644
index 0000000..b3cb20d
--- /dev/null
+++ b/include/linux/power/smb347-charger.h
@@ -0,0 +1,117 @@
+/*
+ * Summit Microelectronics SMB347 Battery Charger Driver
+ *
+ * Copyright (C) 2011, Intel Corporation
+ *
+ * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SMB347_CHARGER_H
+#define SMB347_CHARGER_H
+
+#include <linux/types.h>
+#include <linux/power_supply.h>
+
+enum {
+	/* use the default compensation method */
+	SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1,
+
+	SMB347_SOFT_TEMP_COMPENSATE_NONE,
+	SMB347_SOFT_TEMP_COMPENSATE_CURRENT,
+	SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE,
+};
+
+/* Use default factory programmed value for hard/soft temperature limit */
+#define SMB347_TEMP_USE_DEFAULT		-273
+
+/*
+ * Charging enable can be controlled by software (via i2c) by
+ * smb347-charger driver or by EN pin (active low/high).
+ */
+enum smb347_chg_enable {
+	SMB347_CHG_ENABLE_SW,
+	SMB347_CHG_ENABLE_PIN_ACTIVE_LOW,
+	SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
+};
+
+/**
+ * struct smb347_charger_platform_data - platform data for SMB347 charger
+ * @battery_info: Information about the battery
+ * @max_charge_current: maximum current (in uA) the battery can be charged
+ * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
+ * @pre_charge_current: current (in uA) to use in pre-charging phase
+ * @termination_current: current (in uA) used to determine when the
+ *			 charging cycle terminates
+ * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
+ *			 pre-charge to fast charge mode
+ * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
+ * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
+ *			  input
+ * @chip_temp_threshold: die temperature where device starts limiting charge
+ *			 current [%100 - %130] (in degree C)
+ * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
+ *			  granularity is 5 deg C.
+ * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree  C),
+ *			 granularity is 5 deg C.
+ * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
+ *			  granularity is 5 deg C.
+ * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
+ *			 granularity is 5 deg C.
+ * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
+ * @soft_temp_limit_compensation: compensation method when soft temperature
+ *				  limit is hit
+ * @charge_current_compensation: current (in uA) for charging compensation
+ *				 current when temperature hits soft limits
+ * @use_mains: AC/DC input can be used
+ * @use_usb: USB input can be used
+ * @use_usb_otg: USB OTG output can be used (not implemented yet)
+ * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
+ * @enable_control: how charging enable/disable is controlled
+ *		    (driver/pin controls)
+ *
+ * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
+ * hardware support for these. This is useful when we want to have for
+ * example OTG charging controlled via OTG transceiver driver and not by
+ * the SMB347 hardware.
+ *
+ * Hard and soft temperature limit values are given as described in the
+ * device data sheet and assuming NTC beta value is %3750. Even if this is
+ * not the case, these values should be used. They can be mapped to the
+ * corresponding NTC beta values with the help of table %2 in the data
+ * sheet. So for example if NTC beta is %3375 and we want to program hard
+ * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
+ *
+ * If zero value is given in any of the current and voltage values, the
+ * factory programmed default will be used. For soft/hard temperature
+ * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
+ */
+struct smb347_charger_platform_data {
+	struct power_supply_info battery_info;
+	unsigned int	max_charge_current;
+	unsigned int	max_charge_voltage;
+	unsigned int	pre_charge_current;
+	unsigned int	termination_current;
+	unsigned int	pre_to_fast_voltage;
+	unsigned int	mains_current_limit;
+	unsigned int	usb_hc_current_limit;
+	unsigned int	chip_temp_threshold;
+	int		soft_cold_temp_limit;
+	int		soft_hot_temp_limit;
+	int		hard_cold_temp_limit;
+	int		hard_hot_temp_limit;
+	bool		suspend_on_hard_temp_limit;
+	unsigned int	soft_temp_limit_compensation;
+	unsigned int	charge_current_compensation;
+	bool		use_mains;
+	bool		use_usb;
+	bool		use_usb_otg;
+	int		irq_gpio;
+	enum smb347_chg_enable enable_control;
+};
+
+#endif /* SMB347_CHARGER_H */
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 7abb160..b021084 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -71,7 +71,7 @@
  * @uV_offset: Offset applied to voltages from consumer to compensate for
  *             voltage drops.
  *
- * @min_uA: Smallest consumers consumers may set.
+ * @min_uA: Smallest current consumers may set.
  * @max_uA: Largest current consumers may set.
  *
  * @valid_modes_mask: Mask of modes which may be configured by consumers.
@@ -134,10 +134,8 @@
 /**
  * struct regulator_consumer_supply - supply -> device mapping
  *
- * This maps a supply name to a device.  Only one of dev or dev_name
- * can be specified.  Use of dev_name allows support for buses which
- * make struct device available late such as I2C and is the preferred
- * form.
+ * This maps a supply name to a device. Use of dev_name allows support for
+ * buses which make struct device available late such as I2C.
  *
  * @dev_name: Result of dev_name() for the consumer.
  * @supply: Name for the supply.
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 67be037..7be2e88 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -151,6 +151,9 @@
 
 void ring_buffer_record_disable(struct ring_buffer *buffer);
 void ring_buffer_record_enable(struct ring_buffer *buffer);
+void ring_buffer_record_off(struct ring_buffer *buffer);
+void ring_buffer_record_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3337027..70a3f8d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -481,6 +481,7 @@
 	union {
 		__u32		mark;
 		__u32		dropcount;
+		__u32		avail_size;
 	};
 
 	sk_buff_data_t		transport_header;
@@ -1366,6 +1367,18 @@
 }
 
 /**
+ *	skb_availroom - bytes at buffer end
+ *	@skb: buffer to check
+ *
+ *	Return the number of bytes of free space at the tail of an sk_buff
+ *	allocated by sk_stream_alloc()
+ */
+static inline int skb_availroom(const struct sk_buff *skb)
+{
+	return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
+}
+
+/**
  *	skb_reserve - adjust headroom
  *	@skb: buffer to alter
  *	@len: bytes to move
diff --git a/include/linux/socket.h b/include/linux/socket.h
index da2d3e2..b84bbd4 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -265,7 +265,7 @@
 #define MSG_NOSIGNAL	0x4000	/* Do not generate SIGPIPE */
 #define MSG_MORE	0x8000	/* Sender will send more */
 #define MSG_WAITFORONE	0x10000	/* recvmmsg(): block until 1+ packets avail */
-
+#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
 #define MSG_EOF         MSG_FIN
 
 #define MSG_CMSG_CLOEXEC 0x40000000	/* Set close_on_exit for file
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8dc0ea7..b1fd5c7 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -305,6 +305,13 @@
 	return vm_swappiness;
 }
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
+#else
+static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
+{
+}
+#endif
 #ifdef CONFIG_SWAP
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct page *);
@@ -375,13 +382,6 @@
 {
 }
 #endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
-extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
-#else
-static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
-{
-}
-#endif
 
 #else /* CONFIG_SWAP */
 
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index 1dba6ee..c75128b 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -143,7 +143,6 @@
 
 extern void tboot_probe(void);
 extern void tboot_shutdown(u32 shutdown_type);
-extern void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control);
 extern struct acpi_table_header *tboot_get_dmar_table(
 				      struct acpi_table_header *dmar_tbl);
 extern int tboot_force_iommu(void);
diff --git a/include/linux/types.h b/include/linux/types.h
index e5fa503..7f480db 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -210,6 +210,12 @@
 
 typedef phys_addr_t resource_size_t;
 
+/*
+ * This type is the placeholder for a hardware interrupt number. It has to be
+ * big enough to enclose whatever representation is used by a given platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
 typedef struct {
 	int counter;
 } atomic_t;
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 9c3120d..b572f80 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -47,6 +47,8 @@
  */
 #define VGA_DEFAULT_DEVICE     (NULL)
 
+struct pci_dev;
+
 /* For use by clients */
 
 /**
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d0018d2..8efd28a 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -96,7 +96,6 @@
 	void (*config_changed)(struct virtio_device *dev);
 #ifdef CONFIG_PM
 	int (*freeze)(struct virtio_device *dev);
-	int (*thaw)(struct virtio_device *dev);
 	int (*restore)(struct virtio_device *dev);
 #endif
 };
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 344b0f9..d47e523 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -92,6 +92,7 @@
 	HCI_SERVICE_CACHE,
 	HCI_LINK_KEYS,
 	HCI_DEBUG_KEYS,
+	HCI_UNREGISTER,
 
 	HCI_LE_SCAN,
 	HCI_SSP_ENABLED,
@@ -1327,8 +1328,8 @@
 #define HCI_DEV_NONE	0xffff
 
 #define HCI_CHANNEL_RAW		0
-#define HCI_CHANNEL_CONTROL	1
 #define HCI_CHANNEL_MONITOR	2
+#define HCI_CHANNEL_CONTROL	3
 
 struct hci_filter {
 	unsigned long type_mask;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index daefaac..6822d25 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -427,7 +427,7 @@
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
 {
 	struct hci_dev *hdev = conn->hdev;
-	return (test_bit(HCI_SSP_ENABLED, &hdev->flags) &&
+	return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
 				test_bit(HCI_CONN_SSP_ENABLED, &conn->flags));
 }
 
@@ -907,11 +907,13 @@
 
 static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
 {
-	u8 field_len;
-	size_t parsed;
+	size_t parsed = 0;
 
-	for (parsed = 0; parsed < data_len - 1; parsed += field_len) {
-		field_len = data[0];
+	if (data_len < 2)
+		return false;
+
+	while (parsed < data_len - 1) {
+		u8 field_len = data[0];
 
 		if (field_len == 0)
 			break;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index ffc1377..ebfd91f 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -117,7 +117,7 @@
 #define MGMT_OP_SET_DISCOVERABLE	0x0006
 struct mgmt_cp_set_discoverable {
 	__u8	val;
-	__u16	timeout;
+	__le16	timeout;
 } __packed;
 #define MGMT_SET_DISCOVERABLE_SIZE	3
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 248fb05..83d800c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -620,8 +620,10 @@
  * @llid: mesh local link id
  * @plid: mesh peer link id
  * @plink_state: mesh peer link state
- * @signal: signal strength of last received packet in dBm
- * @signal_avg: signal strength average in dBm
+ * @signal: the signal strength, type depends on the wiphy's signal_type
+	NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: avg signal strength, type depends on the wiphy's signal_type
+	NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
  * @txrate: current unicast bitrate from this station
  * @rxrate: current unicast bitrate to this station
  * @rx_packets: packets received from this station
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 87d203f..9210bdc 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1327,7 +1327,7 @@
 ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
 		      const struct ieee80211_tx_info *c)
 {
-	if (WARN_ON(c->control.rates[0].idx < 0))
+	if (WARN_ON_ONCE(c->control.rates[0].idx < 0))
 		return NULL;
 	return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx];
 }
diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h
index 7e1544e..9d9756c 100644
--- a/include/net/netfilter/xt_log.h
+++ b/include/net/netfilter/xt_log.h
@@ -47,7 +47,7 @@
 	if (likely(m != &emergency))
 		kfree(m);
 	else {
-		xchg(&emergency_ptr, m);
+		emergency_ptr = m;
 		local_bh_enable();
 	}
 }
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 9c23ee8..917741b 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -261,7 +261,8 @@
 		} host_event;
 		struct msg_ping_comp {
 			uint32_t        host_no;
-			uint32_t        status;
+			uint32_t        status; /* enum
+						 * iscsi_ping_status_code */
 			uint32_t	pid;	/* unique ping id associated
 						   with each ping request */
 			uint32_t        data_size;
@@ -483,6 +484,20 @@
 	ISCSI_PORT_STATE_UP		= 0x2,
 };
 
+/* iSCSI PING status/error code */
+enum iscsi_ping_status_code {
+	ISCSI_PING_SUCCESS			= 0,
+	ISCSI_PING_FW_DISABLED			= 0x1,
+	ISCSI_PING_IPADDR_INVALID		= 0x2,
+	ISCSI_PING_LINKLOCAL_IPV6_ADDR_INVALID	= 0x3,
+	ISCSI_PING_TIMEOUT			= 0x4,
+	ISCSI_PING_INVALID_DEST_ADDR		= 0x5,
+	ISCSI_PING_OVERSIZE_PACKET		= 0x6,
+	ISCSI_PING_ICMP_ERROR			= 0x7,
+	ISCSI_PING_MAX_REQ_EXCEEDED		= 0x8,
+	ISCSI_PING_NO_ARP_RECEIVED		= 0x9,
+};
+
 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
 
@@ -578,6 +593,6 @@
 	char username[ISCSI_CHAP_AUTH_NAME_MAX_LEN];
 	uint8_t password[ISCSI_CHAP_AUTH_SECRET_MAX_LEN];
 	uint8_t password_length;
-} __packed;
+};
 
 #endif
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 5a35a2a..cfdb55f 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -165,7 +165,8 @@
  * @switch_name: WWN of switch from advertisement
  * @fabric_name: WWN of fabric from advertisement
  * @fc_map:	 FC_MAP value from advertisement
- * @fcf_mac:	 Ethernet address of the FCF
+ * @fcf_mac:	 Ethernet address of the FCF for FIP traffic
+ * @fcoe_mac:	 Ethernet address of the FCF for FCoE traffic
  * @vfid:	 virtual fabric ID
  * @pri:	 selection priority, smaller values are better
  * @flogi_sent:	 current FLOGI sent to this FCF
@@ -188,6 +189,7 @@
 	u32 fc_map;
 	u16 vfid;
 	u8 fcf_mac[ETH_ALEN];
+	u8 fcoe_mac[ETH_ALEN];
 
 	u8 pri;
 	u8 flogi_sent;
diff --git a/include/sound/core.h b/include/sound/core.h
index b6e0f57..bc05668 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -325,6 +325,13 @@
 
 /* --- */
 
+/* sound printk debug levels */
+enum {
+	SND_PR_ALWAYS,
+	SND_PR_DEBUG,
+	SND_PR_VERBOSE,
+};
+
 #if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
 __printf(4, 5)
 void __snd_printk(unsigned int level, const char *file, int line,
@@ -354,6 +361,8 @@
  */
 #define snd_printd(fmt, args...) \
 	__snd_printk(1, __FILE__, __LINE__, fmt, ##args)
+#define _snd_printd(level, fmt, args...) \
+	__snd_printk(level, __FILE__, __LINE__, fmt, ##args)
 
 /**
  * snd_BUG - give a BUG warning message and stack trace
@@ -383,6 +392,7 @@
 #else /* !CONFIG_SND_DEBUG */
 
 #define snd_printd(fmt, args...)	do { } while (0)
+#define _snd_printd(level, fmt, args...) do { } while (0)
 #define snd_BUG()			do { } while (0)
 static inline int __snd_bug_on(int cond)
 {
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 84f3001..91b91e8 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -6,6 +6,7 @@
 
 #include <linux/writeback.h>
 #include <linux/tracepoint.h>
+#include <trace/events/gfpflags.h>
 
 struct btrfs_root;
 struct btrfs_fs_info;
@@ -862,6 +863,49 @@
 		  __entry->size, __entry->max_size, __entry->bitmap)
 );
 
+struct extent_state;
+TRACE_EVENT(alloc_extent_state,
+
+	TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
+
+	TP_ARGS(state, mask, IP),
+
+	TP_STRUCT__entry(
+		__field(struct extent_state *, state)
+		__field(gfp_t, mask)
+		__field(unsigned long, ip)
+	),
+
+	TP_fast_assign(
+		__entry->state	= state,
+		__entry->mask	= mask,
+		__entry->ip	= IP
+	),
+
+	TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
+		  show_gfp_flags(__entry->mask), (void *)__entry->ip)
+);
+
+TRACE_EVENT(free_extent_state,
+
+	TP_PROTO(struct extent_state *state, unsigned long IP),
+
+	TP_ARGS(state, IP),
+
+	TP_STRUCT__entry(
+		__field(struct extent_state *, state)
+		__field(unsigned long, ip)
+	),
+
+	TP_fast_assign(
+		__entry->state	= state,
+		__entry->ip = IP
+	),
+
+	TP_printk(" state=%p; caller = %pF", __entry->state,
+		  (void *)__entry->ip)
+);
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index fbc7b1a..ea7a203 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -295,7 +295,7 @@
 	TP_fast_assign(
 		__assign_str(filename, bprm->filename);
 		__entry->pid		= p->pid;
-		__entry->old_pid	= p->pid;
+		__entry->old_pid	= old_pid;
 	),
 
 	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 2ea2fdc..4f4d449 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -7,11 +7,13 @@
 
 extern void
 *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-			    dma_addr_t *dma_handle, gfp_t flags);
+			    dma_addr_t *dma_handle, gfp_t flags,
+			    struct dma_attrs *attrs);
 
 extern void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
-			  void *vaddr, dma_addr_t dma_handle);
+			  void *vaddr, dma_addr_t dma_handle,
+			  struct dma_attrs *attrs);
 
 extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 				       unsigned long offset, size_t size,
diff --git a/init/Kconfig b/init/Kconfig
index 72f33fa..6cfd71d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1414,8 +1414,8 @@
 config INIT_ALL_POSSIBLE
 	bool
 	help
-	  Back when each arch used to define their own cpu_online_map and
-	  cpu_possible_map, some of them chose to initialize cpu_possible_map
+	  Back when each arch used to define their own cpu_online_mask and
+	  cpu_possible_mask, some of them chose to initialize cpu_possible_mask
 	  with all 1s, and others with all 0s.  When they were centralised,
 	  it was better to provide this option than to break all the archs
 	  and have several arch maintainers pursuing me down dark alleys.
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 3098a38..9047330 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -2,7 +2,6 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/minix_fs.h>
-#include <linux/ext2_fs.h>
 #include <linux/romfs_fs.h>
 #include <linux/initrd.h>
 #include <linux/sched.h>
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 01f1306..6212586 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -54,20 +54,19 @@
 {
 	const int size = 512;
 	struct minix_super_block *minixsb;
-	struct ext2_super_block *ext2sb;
 	struct romfs_super_block *romfsb;
 	struct cramfs_super *cramfsb;
 	struct squashfs_super_block *squashfsb;
 	int nblocks = -1;
 	unsigned char *buf;
 	const char *compress_name;
+	unsigned long n;
 
 	buf = kmalloc(size, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
 	minixsb = (struct minix_super_block *) buf;
-	ext2sb = (struct ext2_super_block *) buf;
 	romfsb = (struct romfs_super_block *) buf;
 	cramfsb = (struct cramfs_super *) buf;
 	squashfsb = (struct squashfs_super_block *) buf;
@@ -150,12 +149,12 @@
 	}
 
 	/* Try ext2 */
-	if (ext2sb->s_magic == cpu_to_le16(EXT2_SUPER_MAGIC)) {
+	n = ext2_image_size(buf);
+	if (n) {
 		printk(KERN_NOTICE
 		       "RAMDISK: ext2 filesystem found at block %d\n",
 		       start_block);
-		nblocks = le32_to_cpu(ext2sb->s_blocks_count) <<
-			le32_to_cpu(ext2sb->s_log_block_size);
+		nblocks = n;
 		goto done;
 	}
 
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b96ad75..14f7070 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -270,11 +270,11 @@
  * are online.  If none are online, walk up the cpuset hierarchy
  * until we find one that does have some online cpus.  If we get
  * all the way to the top and still haven't found any online cpus,
- * return cpu_online_map.  Or if passed a NULL cs from an exit'ing
- * task, return cpu_online_map.
+ * return cpu_online_mask.  Or if passed a NULL cs from an exit'ing
+ * task, return cpu_online_mask.
  *
  * One way or another, we guarantee to return some non-empty subset
- * of cpu_online_map.
+ * of cpu_online_mask.
  *
  * Call with callback_mutex held.
  */
@@ -867,7 +867,7 @@
 	int retval;
 	int is_load_balanced;
 
-	/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
+	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
 	if (cs == &top_cpuset)
 		return -EACCES;
 
@@ -2149,7 +2149,7 @@
  *
  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
  * attached to the specified @tsk.  Guaranteed to return some non-empty
- * subset of cpu_online_map, even if this means going outside the
+ * subset of cpu_online_mask, even if this means going outside the
  * tasks cpuset.
  **/
 
diff --git a/kernel/cred.c b/kernel/cred.c
index 97b36ee..e70683d 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -386,6 +386,8 @@
 	struct cred *new;
 	int ret;
 
+	p->replacement_session_keyring = NULL;
+
 	if (
 #ifdef CONFIG_KEYS
 		!p->cred->thread_keyring &&
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1dc53ba..0557f24 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -160,37 +160,39 @@
  * Weak aliases for breakpoint management,
  * can be overriden by architectures when needed:
  */
-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
 {
 	int err;
 
-	err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+				BREAK_INSTR_SIZE);
 	if (err)
 		return err;
-
-	return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
-				  BREAK_INSTR_SIZE);
+	err = probe_kernel_write((char *)bpt->bpt_addr,
+				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+	return err;
 }
 
-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
 {
-	return probe_kernel_write((char *)addr,
-				  (char *)bundle, BREAK_INSTR_SIZE);
+	return probe_kernel_write((char *)bpt->bpt_addr,
+				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
 }
 
 int __weak kgdb_validate_break_address(unsigned long addr)
 {
-	char tmp_variable[BREAK_INSTR_SIZE];
+	struct kgdb_bkpt tmp;
 	int err;
-	/* Validate setting the breakpoint and then removing it.  In the
+	/* Validate setting the breakpoint and then removing it.  If the
 	 * remove fails, the kernel needs to emit a bad message because we
 	 * are deep trouble not being able to put things back the way we
 	 * found them.
 	 */
-	err = kgdb_arch_set_breakpoint(addr, tmp_variable);
+	tmp.bpt_addr = addr;
+	err = kgdb_arch_set_breakpoint(&tmp);
 	if (err)
 		return err;
-	err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
+	err = kgdb_arch_remove_breakpoint(&tmp);
 	if (err)
 		printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
 		   "memory destroyed at: %lx", addr);
@@ -234,7 +236,6 @@
  */
 int dbg_activate_sw_breakpoints(void)
 {
-	unsigned long addr;
 	int error;
 	int ret = 0;
 	int i;
@@ -243,16 +244,15 @@
 		if (kgdb_break[i].state != BP_SET)
 			continue;
 
-		addr = kgdb_break[i].bpt_addr;
-		error = kgdb_arch_set_breakpoint(addr,
-				kgdb_break[i].saved_instr);
+		error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
 		if (error) {
 			ret = error;
-			printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+			printk(KERN_INFO "KGDB: BP install failed: %lx",
+			       kgdb_break[i].bpt_addr);
 			continue;
 		}
 
-		kgdb_flush_swbreak_addr(addr);
+		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
 		kgdb_break[i].state = BP_ACTIVE;
 	}
 	return ret;
@@ -301,7 +301,6 @@
 
 int dbg_deactivate_sw_breakpoints(void)
 {
-	unsigned long addr;
 	int error;
 	int ret = 0;
 	int i;
@@ -309,15 +308,14 @@
 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
 		if (kgdb_break[i].state != BP_ACTIVE)
 			continue;
-		addr = kgdb_break[i].bpt_addr;
-		error = kgdb_arch_remove_breakpoint(addr,
-					kgdb_break[i].saved_instr);
+		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
 		if (error) {
-			printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+			printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
+			       kgdb_break[i].bpt_addr);
 			ret = error;
 		}
 
-		kgdb_flush_swbreak_addr(addr);
+		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
 		kgdb_break[i].state = BP_SET;
 	}
 	return ret;
@@ -351,7 +349,6 @@
 
 int dbg_remove_all_break(void)
 {
-	unsigned long addr;
 	int error;
 	int i;
 
@@ -359,12 +356,10 @@
 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
 		if (kgdb_break[i].state != BP_ACTIVE)
 			goto setundefined;
-		addr = kgdb_break[i].bpt_addr;
-		error = kgdb_arch_remove_breakpoint(addr,
-				kgdb_break[i].saved_instr);
+		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
 		if (error)
 			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
-			   addr);
+			       kgdb_break[i].bpt_addr);
 setundefined:
 		kgdb_break[i].state = BP_UNDEFINED;
 	}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 9b5f17d..bb9520f 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -743,7 +743,7 @@
 		kdb_input_flush();
 		c = console_drivers;
 
-		if (!dbg_io_ops->is_console) {
+		if (dbg_io_ops && !dbg_io_ops->is_console) {
 			len = strlen(moreprompt);
 			cp = moreprompt;
 			while (len--) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4b50357..a6a9ec4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3348,7 +3348,7 @@
 	*running = ctx_time - event->tstamp_running;
 }
 
-void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
 }
 
@@ -3398,7 +3398,7 @@
 	userpg->time_running = running +
 			atomic64_read(&event->child_total_time_running);
 
-	perf_update_user_clock(userpg, now);
+	arch_perf_update_userpage(userpg, now);
 
 	barrier();
 	++userpg->lock;
@@ -7116,6 +7116,13 @@
 
 	/* do not patch jump label more than once per second */
 	jump_label_rate_limit(&perf_sched_events, HZ);
+
+	/*
+	 * Build time assertion that we keep the data_head at the intended
+	 * location.  IOW, validation we got the __reserved[] size right.
+	 */
+	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
+		     != 1024);
 }
 
 static int __init perf_event_sysfs_init(void)
diff --git a/kernel/futex.c b/kernel/futex.c
index 72efa1e..e2b0fb9 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -59,6 +59,7 @@
 #include <linux/magic.h>
 #include <linux/pid.h>
 #include <linux/nsproxy.h>
+#include <linux/ptrace.h>
 
 #include <asm/futex.h>
 
@@ -2443,40 +2444,31 @@
 {
 	struct robust_list_head __user *head;
 	unsigned long ret;
-	const struct cred *cred = current_cred(), *pcred;
+	struct task_struct *p;
 
 	if (!futex_cmpxchg_enabled)
 		return -ENOSYS;
 
-	if (!pid)
-		head = current->robust_list;
-	else {
-		struct task_struct *p;
+	WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
 
-		ret = -ESRCH;
-		rcu_read_lock();
+	rcu_read_lock();
+
+	ret = -ESRCH;
+	if (!pid)
+		p = current;
+	else {
 		p = find_task_by_vpid(pid);
 		if (!p)
 			goto err_unlock;
-		ret = -EPERM;
-		pcred = __task_cred(p);
-		/* If victim is in different user_ns, then uids are not
-		   comparable, so we must have CAP_SYS_PTRACE */
-		if (cred->user->user_ns != pcred->user->user_ns) {
-			if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-				goto err_unlock;
-			goto ok;
-		}
-		/* If victim is in same user_ns, then uids are comparable */
-		if (cred->euid != pcred->euid &&
-		    cred->euid != pcred->uid &&
-		    !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-			goto err_unlock;
-ok:
-		head = p->robust_list;
-		rcu_read_unlock();
 	}
 
+	ret = -EPERM;
+	if (!ptrace_may_access(p, PTRACE_MODE_READ))
+		goto err_unlock;
+
+	head = p->robust_list;
+	rcu_read_unlock();
+
 	if (put_user(sizeof(*head), len_ptr))
 		return -EFAULT;
 	return put_user(head, head_ptr);
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 5f9e689..83e368b 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -10,6 +10,7 @@
 #include <linux/compat.h>
 #include <linux/nsproxy.h>
 #include <linux/futex.h>
+#include <linux/ptrace.h>
 
 #include <asm/uaccess.h>
 
@@ -136,40 +137,31 @@
 {
 	struct compat_robust_list_head __user *head;
 	unsigned long ret;
-	const struct cred *cred = current_cred(), *pcred;
+	struct task_struct *p;
 
 	if (!futex_cmpxchg_enabled)
 		return -ENOSYS;
 
-	if (!pid)
-		head = current->compat_robust_list;
-	else {
-		struct task_struct *p;
+	WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
 
-		ret = -ESRCH;
-		rcu_read_lock();
+	rcu_read_lock();
+
+	ret = -ESRCH;
+	if (!pid)
+		p = current;
+	else {
 		p = find_task_by_vpid(pid);
 		if (!p)
 			goto err_unlock;
-		ret = -EPERM;
-		pcred = __task_cred(p);
-		/* If victim is in different user_ns, then uids are not
-		   comparable, so we must have CAP_SYS_PTRACE */
-		if (cred->user->user_ns != pcred->user->user_ns) {
-			if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-				goto err_unlock;
-			goto ok;
-		}
-		/* If victim is in same user_ns, then uids are comparable */
-		if (cred->euid != pcred->euid &&
-		    cred->euid != pcred->uid &&
-		    !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-			goto err_unlock;
-ok:
-		head = p->compat_robust_list;
-		rcu_read_unlock();
 	}
 
+	ret = -EPERM;
+	if (!ptrace_may_access(p, PTRACE_MODE_READ))
+		goto err_unlock;
+
+	head = p->compat_robust_list;
+	rcu_read_unlock();
+
 	if (put_user(sizeof(*head), len_ptr))
 		return -EFAULT;
 	return put_user(ptr_to_compat(head), head_ptr);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index cf1a4a6..d1a758b 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -62,7 +62,7 @@
 	help
 	  This option will show the mapping relationship between hardware irq
 	  numbers and Linux irq numbers. The mapping is exposed via debugfs
-	  in the file "virq_mapping".
+	  in the file "irq_domain_mapping".
 
 	  If you don't know what this means you don't need it.
 
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 6ff84e6..bdb1803 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -54,14 +54,18 @@
 static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
 {
 	/*
-	 * Wake up the handler thread for this action. In case the
-	 * thread crashed and was killed we just pretend that we
-	 * handled the interrupt. The hardirq handler has disabled the
-	 * device interrupt, so no irq storm is lurking. If the
+	 * In case the thread crashed and was killed we just pretend that
+	 * we handled the interrupt. The hardirq handler has disabled the
+	 * device interrupt, so no irq storm is lurking.
+	 */
+	if (action->thread->flags & PF_EXITING)
+		return;
+
+	/*
+	 * Wake up the handler thread for this action. If the
 	 * RUNTHREAD bit is already set, nothing to do.
 	 */
-	if ((action->thread->flags & PF_EXITING) ||
-	    test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+	if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 		return;
 
 	/*
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3601f3f..d34413e 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -23,7 +23,6 @@
 static DEFINE_MUTEX(irq_domain_mutex);
 
 static DEFINE_MUTEX(revmap_trees_mutex);
-static unsigned int irq_virq_count = NR_IRQS;
 static struct irq_domain *irq_default_domain;
 
 /**
@@ -184,13 +183,16 @@
 }
 
 struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
 					 const struct irq_domain_ops *ops,
 					 void *host_data)
 {
 	struct irq_domain *domain = irq_domain_alloc(of_node,
 					IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
-	if (domain)
+	if (domain) {
+		domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
 		irq_domain_add(domain);
+	}
 	return domain;
 }
 
@@ -262,22 +264,6 @@
 	irq_default_domain = domain;
 }
 
-/**
- * irq_set_virq_count() - Set the maximum number of linux irqs
- * @count: number of linux irqs, capped with NR_IRQS
- *
- * This is mainly for use by platforms like iSeries who want to program
- * the virtual irq number in the controller to avoid the reverse mapping
- */
-void irq_set_virq_count(unsigned int count)
-{
-	pr_debug("irq: Trying to set virq count to %d\n", count);
-
-	BUG_ON(count < NUM_ISA_INTERRUPTS);
-	if (count < NR_IRQS)
-		irq_virq_count = count;
-}
-
 static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
 			    irq_hw_number_t hwirq)
 {
@@ -320,13 +306,12 @@
 		pr_debug("irq: create_direct virq allocation failed\n");
 		return 0;
 	}
-	if (virq >= irq_virq_count) {
+	if (virq >= domain->revmap_data.nomap.max_irq) {
 		pr_err("ERROR: no free irqs available below %i maximum\n",
-			irq_virq_count);
+			domain->revmap_data.nomap.max_irq);
 		irq_free_desc(virq);
 		return 0;
 	}
-
 	pr_debug("irq: create_direct obtained virq %d\n", virq);
 
 	if (irq_setup_virq(domain, virq, virq)) {
@@ -350,7 +335,8 @@
 unsigned int irq_create_mapping(struct irq_domain *domain,
 				irq_hw_number_t hwirq)
 {
-	unsigned int virq, hint;
+	unsigned int hint;
+	int virq;
 
 	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 
@@ -377,13 +363,13 @@
 		return irq_domain_legacy_revmap(domain, hwirq);
 
 	/* Allocate a virtual interrupt number */
-	hint = hwirq % irq_virq_count;
+	hint = hwirq % nr_irqs;
 	if (hint == 0)
 		hint++;
 	virq = irq_alloc_desc_from(hint, 0);
-	if (!virq)
+	if (virq <= 0)
 		virq = irq_alloc_desc_from(1, 0);
-	if (!virq) {
+	if (virq <= 0) {
 		pr_debug("irq: -> virq allocation failed\n");
 		return 0;
 	}
@@ -515,7 +501,7 @@
 			      irq_hw_number_t hwirq)
 {
 	unsigned int i;
-	unsigned int hint = hwirq % irq_virq_count;
+	unsigned int hint = hwirq % nr_irqs;
 
 	/* Look for default domain if nececssary */
 	if (domain == NULL)
@@ -536,7 +522,7 @@
 		if (data && (data->domain == domain) && (data->hwirq == hwirq))
 			return i;
 		i++;
-		if (i >= irq_virq_count)
+		if (i >= nr_irqs)
 			i = 1;
 	} while(i != hint);
 	return 0;
@@ -642,8 +628,8 @@
 	void *data;
 	int i;
 
-	seq_printf(m, "%-5s  %-7s  %-15s  %-18s  %s\n", "virq", "hwirq",
-		      "chip name", "chip data", "domain name");
+	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
+		      "chip name", 2 * sizeof(void *) + 2, "chip data", "domain name");
 
 	for (i = 1; i < nr_irqs; i++) {
 		desc = irq_to_desc(i);
@@ -666,7 +652,7 @@
 			seq_printf(m, "%-15s  ", p);
 
 			data = irq_desc_get_chip_data(desc);
-			seq_printf(m, "0x%16p  ", data);
+			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
 
 			if (desc->irq_data.domain && desc->irq_data.domain->of_node)
 				p = desc->irq_data.domain->of_node->full_name;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index b0ccd1a..89a3ea8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -282,7 +282,7 @@
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct cpumask *set = irq_default_affinity;
-	int ret;
+	int ret, node = desc->irq_data.node;
 
 	/* Excludes PER_CPU and NO_BALANCE interrupts */
 	if (!irq_can_set_affinity(irq))
@@ -301,6 +301,13 @@
 	}
 
 	cpumask_and(mask, cpu_online_mask, set);
+	if (node != NUMA_NO_NODE) {
+		const struct cpumask *nodemask = cpumask_of_node(node);
+
+		/* make sure at least one of the cpus in nodemask is online */
+		if (cpumask_intersects(mask, nodemask))
+			cpumask_and(mask, mask, nodemask);
+	}
 	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
 	switch (ret) {
 	case IRQ_SET_MASK_OK:
@@ -645,7 +652,7 @@
  * is marked MASKED.
  */
 static void irq_finalize_oneshot(struct irq_desc *desc,
-				 struct irqaction *action, bool force)
+				 struct irqaction *action)
 {
 	if (!(desc->istate & IRQS_ONESHOT))
 		return;
@@ -679,7 +686,7 @@
 	 * we would clear the threads_oneshot bit of this thread which
 	 * was just set.
 	 */
-	if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 		goto out_unlock;
 
 	desc->threads_oneshot &= ~action->thread_mask;
@@ -739,7 +746,7 @@
 
 	local_bh_disable();
 	ret = action->thread_fn(action->irq, action->dev_id);
-	irq_finalize_oneshot(desc, action, false);
+	irq_finalize_oneshot(desc, action);
 	local_bh_enable();
 	return ret;
 }
@@ -755,7 +762,7 @@
 	irqreturn_t ret;
 
 	ret = action->thread_fn(action->irq, action->dev_id);
-	irq_finalize_oneshot(desc, action, false);
+	irq_finalize_oneshot(desc, action);
 	return ret;
 }
 
@@ -844,7 +851,7 @@
 		wake_threads_waitq(desc);
 
 	/* Prevent a stale desc->threads_oneshot */
-	irq_finalize_oneshot(desc, action, true);
+	irq_finalize_oneshot(desc, action);
 }
 
 static void irq_setup_forced_threading(struct irqaction *new)
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 4742090..c3c8975 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -43,12 +43,16 @@
 	 * masking the irqs.
 	 */
 	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
-		   < nr_cpu_ids))
-		if (!chip->irq_set_affinity(&desc->irq_data,
-					    desc->pending_mask, false)) {
+		   < nr_cpu_ids)) {
+		int ret = chip->irq_set_affinity(&desc->irq_data,
+						 desc->pending_mask, false);
+		switch (ret) {
+		case IRQ_SET_MASK_OK:
 			cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
+		case IRQ_SET_MASK_OK_NOCOPY:
 			irq_set_thread_affinity(desc);
 		}
+	}
 
 	cpumask_clear(desc->pending_mask);
 }
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index c3c46c7..0c56d44 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -5,6 +5,7 @@
  * context. The enqueueing is NMI-safe.
  */
 
+#include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/irq_work.h>
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 957a7aa..05698a7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -322,7 +322,7 @@
  * land has been frozen during a system-wide hibernation or suspend operation).
  * Should always be manipulated under umhelper_sem acquired for write.
  */
-static int usermodehelper_disabled = 1;
+static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
 
 /* Number of helpers running */
 static atomic_t running_helpers = ATOMIC_INIT(0);
@@ -334,32 +334,110 @@
 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
 
 /*
+ * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
+ * to become 'false'.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
+
+/*
  * Time to wait for running_helpers to become zero before the setting of
  * usermodehelper_disabled in usermodehelper_disable() fails
  */
 #define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
 
-void read_lock_usermodehelper(void)
+int usermodehelper_read_trylock(void)
 {
-	down_read(&umhelper_sem);
-}
-EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
+	DEFINE_WAIT(wait);
+	int ret = 0;
 
-void read_unlock_usermodehelper(void)
+	down_read(&umhelper_sem);
+	for (;;) {
+		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
+				TASK_INTERRUPTIBLE);
+		if (!usermodehelper_disabled)
+			break;
+
+		if (usermodehelper_disabled == UMH_DISABLED)
+			ret = -EAGAIN;
+
+		up_read(&umhelper_sem);
+
+		if (ret)
+			break;
+
+		schedule();
+		try_to_freeze();
+
+		down_read(&umhelper_sem);
+	}
+	finish_wait(&usermodehelper_disabled_waitq, &wait);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
+
+long usermodehelper_read_lock_wait(long timeout)
+{
+	DEFINE_WAIT(wait);
+
+	if (timeout < 0)
+		return -EINVAL;
+
+	down_read(&umhelper_sem);
+	for (;;) {
+		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
+				TASK_UNINTERRUPTIBLE);
+		if (!usermodehelper_disabled)
+			break;
+
+		up_read(&umhelper_sem);
+
+		timeout = schedule_timeout(timeout);
+		if (!timeout)
+			break;
+
+		down_read(&umhelper_sem);
+	}
+	finish_wait(&usermodehelper_disabled_waitq, &wait);
+	return timeout;
+}
+EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
+
+void usermodehelper_read_unlock(void)
 {
 	up_read(&umhelper_sem);
 }
-EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
+EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
 
 /**
- * usermodehelper_disable - prevent new helpers from being started
+ * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
+ * depth: New value to assign to usermodehelper_disabled.
+ *
+ * Change the value of usermodehelper_disabled (under umhelper_sem locked for
+ * writing) and wakeup tasks waiting for it to change.
  */
-int usermodehelper_disable(void)
+void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
+{
+	down_write(&umhelper_sem);
+	usermodehelper_disabled = depth;
+	wake_up(&usermodehelper_disabled_waitq);
+	up_write(&umhelper_sem);
+}
+
+/**
+ * __usermodehelper_disable - Prevent new helpers from being started.
+ * @depth: New value to assign to usermodehelper_disabled.
+ *
+ * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
+ */
+int __usermodehelper_disable(enum umh_disable_depth depth)
 {
 	long retval;
 
+	if (!depth)
+		return -EINVAL;
+
 	down_write(&umhelper_sem);
-	usermodehelper_disabled = 1;
+	usermodehelper_disabled = depth;
 	up_write(&umhelper_sem);
 
 	/*
@@ -374,31 +452,10 @@
 	if (retval)
 		return 0;
 
-	down_write(&umhelper_sem);
-	usermodehelper_disabled = 0;
-	up_write(&umhelper_sem);
+	__usermodehelper_set_disable_depth(UMH_ENABLED);
 	return -EAGAIN;
 }
 
-/**
- * usermodehelper_enable - allow new helpers to be started again
- */
-void usermodehelper_enable(void)
-{
-	down_write(&umhelper_sem);
-	usermodehelper_disabled = 0;
-	up_write(&umhelper_sem);
-}
-
-/**
- * usermodehelper_is_disabled - check if new helpers are allowed to be started
- */
-bool usermodehelper_is_disabled(void)
-{
-	return usermodehelper_disabled;
-}
-EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
-
 static void helper_lock(void)
 {
 	atomic_inc(&running_helpers);
diff --git a/kernel/padata.c b/kernel/padata.c
index 6f10eb2..89fe3d1 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1,6 +1,8 @@
 /*
  * padata.c - generic interface to process data streams in parallel
  *
+ * See Documentation/padata.txt for an api documentation.
+ *
  * Copyright (C) 2008, 2009 secunet Security Networks AG
  * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
  *
@@ -354,13 +356,13 @@
 	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
 		return -ENOMEM;
 
-	cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
+	cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
 	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
 		free_cpumask_var(pd->cpumask.cbcpu);
 		return -ENOMEM;
 	}
 
-	cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
+	cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
 	return 0;
 }
 
@@ -564,7 +566,7 @@
 static bool padata_validate_cpumask(struct padata_instance *pinst,
 				    const struct cpumask *cpumask)
 {
-	if (!cpumask_intersects(cpumask, cpu_active_mask)) {
+	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
 		pinst->flags |= PADATA_INVALID;
 		return false;
 	}
@@ -678,7 +680,7 @@
 {
 	struct parallel_data *pd;
 
-	if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
 		pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
 				     pinst->cpumask.cbcpu);
 		if (!pd)
@@ -746,6 +748,9 @@
 			return -ENOMEM;
 
 		padata_replace(pinst, pd);
+
+		cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
+		cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
 	}
 
 	return 0;
diff --git a/kernel/panic.c b/kernel/panic.c
index 80aed44..8ed89a1 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -97,7 +97,7 @@
 	/*
 	 * Avoid nested stack-dumping if a panic occurs during oops processing
 	 */
-	if (!oops_in_progress)
+	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
 		dump_stack();
 #endif
 
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 0a186cf..e09dfbf 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -16,7 +16,6 @@
 #include <linux/string.h>
 #include <linux/device.h>
 #include <linux/async.h>
-#include <linux/kmod.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
 #include <linux/mount.h>
@@ -611,14 +610,10 @@
 	if (error)
 		goto Exit;
 
-	error = usermodehelper_disable();
-	if (error)
-		goto Exit;
-
 	/* Allocate memory management structures */
 	error = create_basic_memory_bitmaps();
 	if (error)
-		goto Enable_umh;
+		goto Exit;
 
 	printk(KERN_INFO "PM: Syncing filesystems ... ");
 	sys_sync();
@@ -661,8 +656,6 @@
 
  Free_bitmaps:
 	free_basic_memory_bitmaps();
- Enable_umh:
-	usermodehelper_enable();
  Exit:
 	pm_notifier_call_chain(PM_POST_HIBERNATION);
 	pm_restore_console();
@@ -777,16 +770,10 @@
 	if (error)
 		goto close_finish;
 
-	error = usermodehelper_disable();
+	error = create_basic_memory_bitmaps();
 	if (error)
 		goto close_finish;
 
-	error = create_basic_memory_bitmaps();
-	if (error) {
-		usermodehelper_enable();
-		goto close_finish;
-	}
-
 	pr_debug("PM: Preparing processes for restore.\n");
 	error = freeze_processes();
 	if (error) {
@@ -806,7 +793,6 @@
 	thaw_processes();
  Done:
 	free_basic_memory_bitmaps();
-	usermodehelper_enable();
  Finish:
 	pm_notifier_call_chain(PM_POST_RESTORE);
 	pm_restore_console();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0d2aeb2..19db29f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
 #include <linux/freezer.h>
 #include <linux/delay.h>
 #include <linux/workqueue.h>
+#include <linux/kmod.h>
 
 /* 
  * Timeout for stopping processes
@@ -122,6 +123,10 @@
 {
 	int error;
 
+	error = __usermodehelper_disable(UMH_FREEZING);
+	if (error)
+		return error;
+
 	if (!pm_freezing)
 		atomic_inc(&system_freezing_cnt);
 
@@ -130,6 +135,7 @@
 	error = try_to_freeze_tasks(true);
 	if (!error) {
 		printk("done.");
+		__usermodehelper_set_disable_depth(UMH_DISABLED);
 		oom_killer_disable();
 	}
 	printk("\n");
@@ -187,6 +193,8 @@
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
 
+	usermodehelper_enable();
+
 	schedule();
 	printk("done.\n");
 }
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index d6d6dbd..6a031e6 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -230,6 +230,21 @@
 EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
 /**
+ * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
+ * @work: work struct for the delayed work (timeout)
+ *
+ * This cancels the timeout request by falling back to the default at timeout.
+ */
+static void pm_qos_work_fn(struct work_struct *work)
+{
+	struct pm_qos_request *req = container_of(to_delayed_work(work),
+						  struct pm_qos_request,
+						  work);
+
+	pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+}
+
+/**
  * pm_qos_add_request - inserts new qos request into the list
  * @req: pointer to a preallocated handle
  * @pm_qos_class: identifies which list of qos request to use
@@ -253,6 +268,7 @@
 		return;
 	}
 	req->pm_qos_class = pm_qos_class;
+	INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
 	pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
 			     &req->node, PM_QOS_ADD_REQ, value);
 }
@@ -279,6 +295,9 @@
 		return;
 	}
 
+	if (delayed_work_pending(&req->work))
+		cancel_delayed_work_sync(&req->work);
+
 	if (new_value != req->node.prio)
 		pm_qos_update_target(
 			pm_qos_array[req->pm_qos_class]->constraints,
@@ -287,6 +306,34 @@
 EXPORT_SYMBOL_GPL(pm_qos_update_request);
 
 /**
+ * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
+ * @req : handle to list element holding a pm_qos request to use
+ * @new_value: defines the temporal qos request
+ * @timeout_us: the effective duration of this qos request in usecs.
+ *
+ * After timeout_us, this qos request is cancelled automatically.
+ */
+void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
+				   unsigned long timeout_us)
+{
+	if (!req)
+		return;
+	if (WARN(!pm_qos_request_active(req),
+		 "%s called for unknown object.", __func__))
+		return;
+
+	if (delayed_work_pending(&req->work))
+		cancel_delayed_work_sync(&req->work);
+
+	if (new_value != req->node.prio)
+		pm_qos_update_target(
+			pm_qos_array[req->pm_qos_class]->constraints,
+			&req->node, PM_QOS_UPDATE_REQ, new_value);
+
+	schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
+}
+
+/**
  * pm_qos_remove_request - modifies an existing qos request
  * @req: handle to request list element
  *
@@ -305,6 +352,9 @@
 		return;
 	}
 
+	if (delayed_work_pending(&req->work))
+		cancel_delayed_work_sync(&req->work);
+
 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
 			     &req->node, PM_QOS_REMOVE_REQ,
 			     PM_QOS_DEFAULT_VALUE);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 88e5c967..396d262 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -12,7 +12,6 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/kmod.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/syscalls.h>
@@ -102,17 +101,12 @@
 	if (error)
 		goto Finish;
 
-	error = usermodehelper_disable();
-	if (error)
-		goto Finish;
-
 	error = suspend_freeze_processes();
 	if (!error)
 		return 0;
 
 	suspend_stats.failed_freeze++;
 	dpm_save_failed_step(SUSPEND_FREEZE);
-	usermodehelper_enable();
  Finish:
 	pm_notifier_call_chain(PM_POST_SUSPEND);
 	pm_restore_console();
@@ -259,7 +253,6 @@
 static void suspend_finish(void)
 {
 	suspend_thaw_processes();
-	usermodehelper_enable();
 	pm_notifier_call_chain(PM_POST_SUSPEND);
 	pm_restore_console();
 }
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 33c4329..91b0fd0 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -12,7 +12,6 @@
 #include <linux/suspend.h>
 #include <linux/syscalls.h>
 #include <linux/reboot.h>
-#include <linux/kmod.h>
 #include <linux/string.h>
 #include <linux/device.h>
 #include <linux/miscdevice.h>
@@ -222,14 +221,8 @@
 		sys_sync();
 		printk("done.\n");
 
-		error = usermodehelper_disable();
-		if (error)
-			break;
-
 		error = freeze_processes();
-		if (error)
-			usermodehelper_enable();
-		else
+		if (!error)
 			data->frozen = 1;
 		break;
 
@@ -238,7 +231,6 @@
 			break;
 		pm_restore_gfp_mask();
 		thaw_processes();
-		usermodehelper_enable();
 		data->frozen = 0;
 		break;
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e3ed0ec..4603b9d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1270,7 +1270,7 @@
 	int dest_cpu;
 
 	/* Look for allowed, online CPU in same node. */
-	for_each_cpu_mask(dest_cpu, *nodemask) {
+	for_each_cpu(dest_cpu, nodemask) {
 		if (!cpu_online(dest_cpu))
 			continue;
 		if (!cpu_active(dest_cpu))
@@ -1281,7 +1281,7 @@
 
 	for (;;) {
 		/* Any allowed, online CPU? */
-		for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
+		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
 			if (!cpu_online(dest_cpu))
 				continue;
 			if (!cpu_active(dest_cpu))
@@ -1964,6 +1964,7 @@
 	local_irq_enable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
 	finish_lock_switch(rq, prev);
+	finish_arch_post_lock_switch();
 
 	fire_sched_in_preempt_notifiers(current);
 	if (mm)
@@ -3101,8 +3102,6 @@
  */
 static noinline void __schedule_bug(struct task_struct *prev)
 {
-	struct pt_regs *regs = get_irq_regs();
-
 	if (oops_in_progress)
 		return;
 
@@ -3113,11 +3112,7 @@
 	print_modules();
 	if (irqs_disabled())
 		print_irqtrace_events(prev);
-
-	if (regs)
-		show_regs(regs);
-	else
-		dump_stack();
+	dump_stack();
 }
 
 /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 42b1f30..fb3acba 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -681,6 +681,9 @@
 #ifndef finish_arch_switch
 # define finish_arch_switch(prev)	do { } while (0)
 #endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch()	do { } while (0)
+#endif
 
 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 52b3a06..4ab1187 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -170,7 +170,7 @@
 #endif
 
 #ifdef CONFIG_PRINTK
-static int proc_dmesg_restrict(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos);
 #endif
 
@@ -703,7 +703,7 @@
 		.data		= &dmesg_restrict,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_dointvec_minmax_sysadmin,
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
@@ -712,7 +712,7 @@
 		.data		= &kptr_restrict,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dmesg_restrict,
+		.proc_handler	= proc_dointvec_minmax_sysadmin,
 		.extra1		= &zero,
 		.extra2		= &two,
 	},
@@ -1943,7 +1943,7 @@
 }
 
 #ifdef CONFIG_PRINTK
-static int proc_dmesg_restrict(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	if (write && !capable(CAP_SYS_ADMIN))
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cd31345..a1d2849 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,7 +141,7 @@
 config FUNCTION_TRACER
 	bool "Kernel Function Tracer"
 	depends on HAVE_FUNCTION_TRACER
-	select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE
+	select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
 	select KALLSYMS
 	select GENERIC_TRACER
 	select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index cdea7b5..c0bd030 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -311,13 +311,6 @@
 }
 EXPORT_SYMBOL_GPL(blk_trace_remove);
 
-static int blk_dropped_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-
-	return 0;
-}
-
 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
 				size_t count, loff_t *ppos)
 {
@@ -331,18 +324,11 @@
 
 static const struct file_operations blk_dropped_fops = {
 	.owner =	THIS_MODULE,
-	.open =		blk_dropped_open,
+	.open =		simple_open,
 	.read =		blk_dropped_read,
 	.llseek =	default_llseek,
 };
 
-static int blk_msg_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-
-	return 0;
-}
-
 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
 				size_t count, loff_t *ppos)
 {
@@ -371,7 +357,7 @@
 
 static const struct file_operations blk_msg_fops = {
 	.owner =	THIS_MODULE,
-	.open =		blk_msg_open,
+	.open =		simple_open,
 	.write =	blk_msg_write,
 	.llseek =	noop_llseek,
 };
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 867bd1d..0fa92f6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -249,7 +249,8 @@
 #else
 	__ftrace_trace_function = func;
 #endif
-	ftrace_trace_function = ftrace_test_stop_func;
+	ftrace_trace_function =
+		(func == ftrace_stub) ? func : ftrace_test_stop_func;
 #endif
 }
 
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f5b7b5c..cf8d11e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -154,35 +154,12 @@
 
 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 
+/* Used for individual buffers (after the counter) */
+#define RB_BUFFER_OFF		(1 << 20)
+
 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 
 /**
- * tracing_on - enable all tracing buffers
- *
- * This function enables all tracing buffers that may have been
- * disabled with tracing_off.
- */
-void tracing_on(void)
-{
-	set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_on);
-
-/**
- * tracing_off - turn off all tracing buffers
- *
- * This function stops all tracing buffers from recording data.
- * It does not disable any overhead the tracers themselves may
- * be causing. This function simply causes all recording to
- * the ring buffers to fail.
- */
-void tracing_off(void)
-{
-	clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_off);
-
-/**
  * tracing_off_permanent - permanently disable ring buffers
  *
  * This function, once called, will disable all ring buffers
@@ -193,15 +170,6 @@
 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 }
 
-/**
- * tracing_is_on - show state of ring buffers enabled
- */
-int tracing_is_on(void)
-{
-	return ring_buffer_flags == RB_BUFFERS_ON;
-}
-EXPORT_SYMBOL_GPL(tracing_is_on);
-
 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 #define RB_ALIGNMENT		4U
 #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -2619,6 +2587,63 @@
 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
 
 /**
+ * ring_buffer_record_off - stop all writes into the buffer
+ * @buffer: The ring buffer to stop writes to.
+ *
+ * This prevents all writes to the buffer. Any attempt to write
+ * to the buffer after this will fail and return NULL.
+ *
+ * This is different than ring_buffer_record_disable() as
+ * it works like an on/off switch, where as the disable() verison
+ * must be paired with a enable().
+ */
+void ring_buffer_record_off(struct ring_buffer *buffer)
+{
+	unsigned int rd;
+	unsigned int new_rd;
+
+	do {
+		rd = atomic_read(&buffer->record_disabled);
+		new_rd = rd | RB_BUFFER_OFF;
+	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_off);
+
+/**
+ * ring_buffer_record_on - restart writes into the buffer
+ * @buffer: The ring buffer to start writes to.
+ *
+ * This enables all writes to the buffer that was disabled by
+ * ring_buffer_record_off().
+ *
+ * This is different than ring_buffer_record_enable() as
+ * it works like an on/off switch, where as the enable() verison
+ * must be paired with a disable().
+ */
+void ring_buffer_record_on(struct ring_buffer *buffer)
+{
+	unsigned int rd;
+	unsigned int new_rd;
+
+	do {
+		rd = atomic_read(&buffer->record_disabled);
+		new_rd = rd & ~RB_BUFFER_OFF;
+	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_on);
+
+/**
+ * ring_buffer_record_is_on - return true if the ring buffer can write
+ * @buffer: The ring buffer to see if write is enabled
+ *
+ * Returns true if the ring buffer is in a state that it accepts writes.
+ */
+int ring_buffer_record_is_on(struct ring_buffer *buffer)
+{
+	return !atomic_read(&buffer->record_disabled);
+}
+
+/**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
  * @cpu: The CPU buffer to stop
@@ -4039,68 +4064,6 @@
 }
 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
 
-#ifdef CONFIG_TRACING
-static ssize_t
-rb_simple_read(struct file *filp, char __user *ubuf,
-	       size_t cnt, loff_t *ppos)
-{
-	unsigned long *p = filp->private_data;
-	char buf[64];
-	int r;
-
-	if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
-		r = sprintf(buf, "permanently disabled\n");
-	else
-		r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
-
-	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-rb_simple_write(struct file *filp, const char __user *ubuf,
-		size_t cnt, loff_t *ppos)
-{
-	unsigned long *p = filp->private_data;
-	unsigned long val;
-	int ret;
-
-	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
-	if (ret)
-		return ret;
-
-	if (val)
-		set_bit(RB_BUFFERS_ON_BIT, p);
-	else
-		clear_bit(RB_BUFFERS_ON_BIT, p);
-
-	(*ppos)++;
-
-	return cnt;
-}
-
-static const struct file_operations rb_simple_fops = {
-	.open		= tracing_open_generic,
-	.read		= rb_simple_read,
-	.write		= rb_simple_write,
-	.llseek		= default_llseek,
-};
-
-
-static __init int rb_init_debugfs(void)
-{
-	struct dentry *d_tracer;
-
-	d_tracer = tracing_init_dentry();
-
-	trace_create_file("tracing_on", 0644, d_tracer,
-			    &ring_buffer_flags, &rb_simple_fops);
-
-	return 0;
-}
-
-fs_initcall(rb_init_debugfs);
-#endif
-
 #ifdef CONFIG_HOTPLUG_CPU
 static int rb_cpu_notify(struct notifier_block *self,
 			 unsigned long action, void *hcpu)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 10d5503..ed7b5d1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -36,6 +36,7 @@
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/poll.h>
+#include <linux/nmi.h>
 #include <linux/fs.h>
 
 #include "trace.h"
@@ -352,6 +353,59 @@
 static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
 
 /**
+ * tracing_on - enable tracing buffers
+ *
+ * This function enables tracing buffers that may have been
+ * disabled with tracing_off.
+ */
+void tracing_on(void)
+{
+	if (global_trace.buffer)
+		ring_buffer_record_on(global_trace.buffer);
+	/*
+	 * This flag is only looked at when buffers haven't been
+	 * allocated yet. We don't really care about the race
+	 * between setting this flag and actually turning
+	 * on the buffer.
+	 */
+	global_trace.buffer_disabled = 0;
+}
+EXPORT_SYMBOL_GPL(tracing_on);
+
+/**
+ * tracing_off - turn off tracing buffers
+ *
+ * This function stops the tracing buffers from recording data.
+ * It does not disable any overhead the tracers themselves may
+ * be causing. This function simply causes all recording to
+ * the ring buffers to fail.
+ */
+void tracing_off(void)
+{
+	if (global_trace.buffer)
+		ring_buffer_record_on(global_trace.buffer);
+	/*
+	 * This flag is only looked at when buffers haven't been
+	 * allocated yet. We don't really care about the race
+	 * between setting this flag and actually turning
+	 * on the buffer.
+	 */
+	global_trace.buffer_disabled = 1;
+}
+EXPORT_SYMBOL_GPL(tracing_off);
+
+/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+int tracing_is_on(void)
+{
+	if (global_trace.buffer)
+		return ring_buffer_record_is_on(global_trace.buffer);
+	return !global_trace.buffer_disabled;
+}
+EXPORT_SYMBOL_GPL(tracing_is_on);
+
+/**
  * trace_wake_up - wake up tasks waiting for trace input
  *
  * Schedules a delayed work to wake up any task that is blocked on the
@@ -1644,6 +1698,7 @@
 	int cpu_file = iter->cpu_file;
 	u64 next_ts = 0, ts;
 	int next_cpu = -1;
+	int next_size = 0;
 	int cpu;
 
 	/*
@@ -1675,9 +1730,12 @@
 			next_cpu = cpu;
 			next_ts = ts;
 			next_lost = lost_events;
+			next_size = iter->ent_size;
 		}
 	}
 
+	iter->ent_size = next_size;
+
 	if (ent_cpu)
 		*ent_cpu = next_cpu;
 
@@ -4567,6 +4625,55 @@
 		create_trace_option_core_file(trace_options[i], i);
 }
 
+static ssize_t
+rb_simple_read(struct file *filp, char __user *ubuf,
+	       size_t cnt, loff_t *ppos)
+{
+	struct ring_buffer *buffer = filp->private_data;
+	char buf[64];
+	int r;
+
+	if (buffer)
+		r = ring_buffer_record_is_on(buffer);
+	else
+		r = 0;
+
+	r = sprintf(buf, "%d\n", r);
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+rb_simple_write(struct file *filp, const char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	struct ring_buffer *buffer = filp->private_data;
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+	if (ret)
+		return ret;
+
+	if (buffer) {
+		if (val)
+			ring_buffer_record_on(buffer);
+		else
+			ring_buffer_record_off(buffer);
+	}
+
+	(*ppos)++;
+
+	return cnt;
+}
+
+static const struct file_operations rb_simple_fops = {
+	.open		= tracing_open_generic,
+	.read		= rb_simple_read,
+	.write		= rb_simple_write,
+	.llseek		= default_llseek,
+};
+
 static __init int tracer_init_debugfs(void)
 {
 	struct dentry *d_tracer;
@@ -4626,6 +4733,9 @@
 	trace_create_file("trace_clock", 0644, d_tracer, NULL,
 			  &trace_clock_fops);
 
+	trace_create_file("tracing_on", 0644, d_tracer,
+			    global_trace.buffer, &rb_simple_fops);
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
 			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4798,6 +4908,7 @@
 			if (ret != TRACE_TYPE_NO_CONSUME)
 				trace_consume(&iter);
 		}
+		touch_nmi_watchdog();
 
 		trace_printk_seq(&iter.seq);
 	}
@@ -4863,6 +4974,8 @@
 		goto out_free_cpumask;
 	}
 	global_trace.entries = ring_buffer_size(global_trace.buffer);
+	if (global_trace.buffer_disabled)
+		tracing_off();
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 54faec7..95059f0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -154,6 +154,7 @@
 	struct ring_buffer	*buffer;
 	unsigned long		entries;
 	int			cpu;
+	int			buffer_disabled;
 	cycle_t			time_start;
 	struct task_struct	*waiter;
 	struct trace_array_cpu	*data[NR_CPUS];
@@ -835,13 +836,11 @@
 		     filter)
 #include "trace_entries.h"
 
-#ifdef CONFIG_PERF_EVENTS
 #ifdef CONFIG_FUNCTION_TRACER
 int perf_ftrace_event_register(struct ftrace_event_call *call,
 			       enum trace_reg type, void *data);
 #else
 #define perf_ftrace_event_register NULL
 #endif /* CONFIG_FUNCTION_TRACER */
-#endif /* CONFIG_PERF_EVENTS */
 
 #endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index d91eb05..4108e12 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -166,6 +166,12 @@
 
 #define FTRACE_STACK_ENTRIES	8
 
+#ifndef CONFIG_64BIT
+# define IP_FMT "%08lx"
+#else
+# define IP_FMT "%016lx"
+#endif
+
 FTRACE_ENTRY(kernel_stack, stack_entry,
 
 	TRACE_STACK,
@@ -175,8 +181,9 @@
 		__dynamic_array(unsigned long,	caller	)
 	),
 
-	F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
-		 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+	F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+		 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+		 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
 		 __entry->caller[0], __entry->caller[1], __entry->caller[2],
 		 __entry->caller[3], __entry->caller[4], __entry->caller[5],
 		 __entry->caller[6], __entry->caller[7]),
@@ -193,8 +200,9 @@
 		__array(	unsigned long,	caller, FTRACE_STACK_ENTRIES	)
 	),
 
-	F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
-		 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+	F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+		 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+		 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
 		 __entry->caller[0], __entry->caller[1], __entry->caller[2],
 		 __entry->caller[3], __entry->caller[4], __entry->caller[5],
 		 __entry->caller[6], __entry->caller[7]),
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7b46c9b..3dd15e8 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -162,7 +162,7 @@
 #define __dynamic_array(type, item)
 
 #undef F_printk
-#define F_printk(fmt, args...) #fmt ", "  __stringify(args)
+#define F_printk(fmt, args...) __stringify(fmt) ", "  __stringify(args)
 
 #undef FTRACE_ENTRY_REG
 #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b8ce6f4..cd65cb1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2791,6 +2791,7 @@
 	 * so no worry about deadlock.
 	 */
 	page = pte_page(entry);
+	get_page(page);
 	if (page != pagecache_page)
 		lock_page(page);
 
@@ -2822,6 +2823,7 @@
 	}
 	if (page != pagecache_page)
 		unlock_page(page);
+	put_page(page);
 
 out_mutex:
 	mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7d698df..a7165a6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2165,7 +2165,7 @@
 	if (action == CPU_ONLINE)
 		return NOTIFY_OK;
 
-	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
+	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
 		return NOTIFY_OK;
 
 	for_each_mem_cgroup(iter)
@@ -3763,7 +3763,7 @@
 			goto try_to_free;
 		cond_resched();
 	/* "ret" should also be checked to ensure all lists are empty. */
-	} while (memcg->res.usage > 0 || ret);
+	} while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
 out:
 	css_put(&memcg->css);
 	return ret;
@@ -3778,7 +3778,7 @@
 	lru_add_drain_all();
 	/* try to free all pages in this cgroup */
 	shrink = 1;
-	while (nr_retries && memcg->res.usage > 0) {
+	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
 		int progress;
 
 		if (signal_pending(current)) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33c332b..1a51868 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2107,12 +2107,7 @@
 		 * with multiple processes reclaiming pages, the total
 		 * freeing target can get unreasonably large.
 		 */
-		if (nr_reclaimed >= nr_to_reclaim)
-			nr_to_reclaim = 0;
-		else
-			nr_to_reclaim -= nr_reclaimed;
-
-		if (!nr_to_reclaim && priority < DEF_PRIORITY)
+		if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
 			break;
 	}
 	blk_finish_plug(&plug);
diff --git a/net/802/garp.c b/net/802/garp.c
index 8e21b6d..a5c2248 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -167,7 +167,8 @@
 	return NULL;
 }
 
-static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new)
+static struct garp_attr *garp_attr_create(struct garp_applicant *app,
+					  const void *data, u8 len, u8 type)
 {
 	struct rb_node *parent = NULL, **p = &app->gid.rb_node;
 	struct garp_attr *attr;
@@ -176,21 +177,16 @@
 	while (*p) {
 		parent = *p;
 		attr = rb_entry(parent, struct garp_attr, node);
-		d = garp_attr_cmp(attr, new->data, new->dlen, new->type);
+		d = garp_attr_cmp(attr, data, len, type);
 		if (d < 0)
 			p = &parent->rb_left;
 		else if (d > 0)
 			p = &parent->rb_right;
+		else {
+			/* The attribute already exists; re-use it. */
+			return attr;
+		}
 	}
-	rb_link_node(&new->node, parent, p);
-	rb_insert_color(&new->node, &app->gid);
-}
-
-static struct garp_attr *garp_attr_create(struct garp_applicant *app,
-					  const void *data, u8 len, u8 type)
-{
-	struct garp_attr *attr;
-
 	attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
 	if (!attr)
 		return attr;
@@ -198,7 +194,9 @@
 	attr->type  = type;
 	attr->dlen  = len;
 	memcpy(attr->data, data, len);
-	garp_attr_insert(app, attr);
+
+	rb_link_node(&attr->node, parent, p);
+	rb_insert_color(&attr->node, &app->gid);
 	return attr;
 }
 
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e33af63..92a857e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -665,6 +665,11 @@
 
 	hci_req_lock(hdev);
 
+	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
+		ret = -ENODEV;
+		goto done;
+	}
+
 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
 		ret = -ERFKILL;
 		goto done;
@@ -1849,6 +1854,8 @@
 
 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
+	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+
 	write_lock(&hci_dev_list_lock);
 	list_del(&hdev->list);
 	write_unlock(&hci_dev_list_lock);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b8e17e4..94552b3 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1308,6 +1308,7 @@
 	if (chan->retry_count >= chan->remote_max_tx) {
 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
 		l2cap_chan_unlock(chan);
+		l2cap_chan_put(chan);
 		return;
 	}
 
@@ -1316,6 +1317,7 @@
 
 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
 	l2cap_chan_unlock(chan);
+	l2cap_chan_put(chan);
 }
 
 static void l2cap_retrans_timeout(struct work_struct *work)
@@ -1335,6 +1337,7 @@
 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
 
 	l2cap_chan_unlock(chan);
+	l2cap_chan_put(chan);
 }
 
 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index c4fe583..29122ed 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -82,7 +82,7 @@
 	}
 
 	if (la.l2_cid)
-		err = l2cap_add_scid(chan, la.l2_cid);
+		err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
 	else
 		err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
 
@@ -123,7 +123,8 @@
 	if (la.l2_cid && la.l2_psm)
 		return -EINVAL;
 
-	err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
+	err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
+				&la.l2_bdaddr);
 	if (err)
 		return err;
 
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7fcff88..4ef275c 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2523,13 +2523,18 @@
 
 	if (cp->val) {
 		type = PAGE_SCAN_TYPE_INTERLACED;
-		acp.interval = 0x0024;	/* 22.5 msec page scan interval */
+
+		/* 22.5 msec page scan interval */
+		acp.interval = __constant_cpu_to_le16(0x0024);
 	} else {
 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
-		acp.interval = 0x0800;	/* default 1.28 sec page scan */
+
+		/* default 1.28 sec page scan */
+		acp.interval = __constant_cpu_to_le16(0x0800);
 	}
 
-	acp.window = 0x0012;	/* default 11.25 msec page scan window */
+	/* default 11.25 msec page scan window */
+	acp.window = __constant_cpu_to_le16(0x0012);
 
 	err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
 			   &acp);
@@ -2936,7 +2941,7 @@
 					  name, name_len);
 
 	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
-		eir_len = eir_append_data(&ev->eir[eir_len], eir_len,
+		eir_len = eir_append_data(ev->eir, eir_len,
 					  EIR_CLASS_OF_DEV, dev_class, 3);
 
 	put_unaligned_le16(eir_len, &ev->eir_len);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 702a1ae..27ca25e 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -241,7 +241,6 @@
 	hlist_del_rcu(&mp->hlist[mdb->ver]);
 	mdb->size--;
 
-	del_timer(&mp->query_timer);
 	call_rcu_bh(&mp->rcu, br_multicast_free_group);
 
 out:
@@ -271,7 +270,6 @@
 		rcu_assign_pointer(*pp, p->next);
 		hlist_del_init(&p->mglist);
 		del_timer(&p->timer);
-		del_timer(&p->query_timer);
 		call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
 		if (!mp->ports && !mp->mglist &&
@@ -507,74 +505,6 @@
 	return NULL;
 }
 
-static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
-{
-	struct net_bridge *br = mp->br;
-	struct sk_buff *skb;
-
-	skb = br_multicast_alloc_query(br, &mp->addr);
-	if (!skb)
-		goto timer;
-
-	netif_rx(skb);
-
-timer:
-	if (++mp->queries_sent < br->multicast_last_member_count)
-		mod_timer(&mp->query_timer,
-			  jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_group_query_expired(unsigned long data)
-{
-	struct net_bridge_mdb_entry *mp = (void *)data;
-	struct net_bridge *br = mp->br;
-
-	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev) || !mp->mglist ||
-	    mp->queries_sent >= br->multicast_last_member_count)
-		goto out;
-
-	br_multicast_send_group_query(mp);
-
-out:
-	spin_unlock(&br->multicast_lock);
-}
-
-static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
-{
-	struct net_bridge_port *port = pg->port;
-	struct net_bridge *br = port->br;
-	struct sk_buff *skb;
-
-	skb = br_multicast_alloc_query(br, &pg->addr);
-	if (!skb)
-		goto timer;
-
-	br_deliver(port, skb);
-
-timer:
-	if (++pg->queries_sent < br->multicast_last_member_count)
-		mod_timer(&pg->query_timer,
-			  jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_port_group_query_expired(unsigned long data)
-{
-	struct net_bridge_port_group *pg = (void *)data;
-	struct net_bridge_port *port = pg->port;
-	struct net_bridge *br = port->br;
-
-	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
-	    pg->queries_sent >= br->multicast_last_member_count)
-		goto out;
-
-	br_multicast_send_port_group_query(pg);
-
-out:
-	spin_unlock(&br->multicast_lock);
-}
-
 static struct net_bridge_mdb_entry *br_multicast_get_group(
 	struct net_bridge *br, struct net_bridge_port *port,
 	struct br_ip *group, int hash)
@@ -690,8 +620,6 @@
 	mp->addr = *group;
 	setup_timer(&mp->timer, br_multicast_group_expired,
 		    (unsigned long)mp);
-	setup_timer(&mp->query_timer, br_multicast_group_query_expired,
-		    (unsigned long)mp);
 
 	hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
 	mdb->size++;
@@ -746,8 +674,6 @@
 	hlist_add_head(&p->mglist, &port->mglist);
 	setup_timer(&p->timer, br_multicast_port_group_expired,
 		    (unsigned long)p);
-	setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
-		    (unsigned long)p);
 
 	rcu_assign_pointer(*pp, p);
 
@@ -1291,9 +1217,6 @@
 		     time_after(mp->timer.expires, time) :
 		     try_to_del_timer_sync(&mp->timer) >= 0)) {
 			mod_timer(&mp->timer, time);
-
-			mp->queries_sent = 0;
-			mod_timer(&mp->query_timer, now);
 		}
 
 		goto out;
@@ -1310,9 +1233,6 @@
 		     time_after(p->timer.expires, time) :
 		     try_to_del_timer_sync(&p->timer) >= 0)) {
 			mod_timer(&p->timer, time);
-
-			p->queries_sent = 0;
-			mod_timer(&p->query_timer, now);
 		}
 
 		break;
@@ -1681,7 +1601,6 @@
 		hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
 					  hlist[ver]) {
 			del_timer(&mp->timer);
-			del_timer(&mp->query_timer);
 			call_rcu_bh(&mp->rcu, br_multicast_free_group);
 		}
 	}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0b67a63..e1d8822 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,9 +82,7 @@
 	struct hlist_node		mglist;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
-	struct timer_list		query_timer;
 	struct br_ip			addr;
-	u32				queries_sent;
 };
 
 struct net_bridge_mdb_entry
@@ -94,10 +92,8 @@
 	struct net_bridge_port_group __rcu *ports;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
-	struct timer_list		query_timer;
 	struct br_ip			addr;
 	bool				mglist;
-	u32				queries_sent;
 };
 
 struct net_bridge_mdb_htable
diff --git a/net/core/dev.c b/net/core/dev.c
index 5d59155..c25d453 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1596,6 +1596,7 @@
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
+	skb->skb_iif = 0;
 	skb_set_dev(skb, dev);
 	skb->tstamp.tv64 = 0;
 	skb->pkt_type = PACKET_HOST;
@@ -4027,54 +4028,41 @@
 
 #ifdef CONFIG_PROC_FS
 
-#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
-
-struct dev_iter_state {
-	struct seq_net_private p;
-	unsigned int pos; /* bucket << BUCKET_SPACE + offset */
-};
+#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
 
 #define get_bucket(x) ((x) >> BUCKET_SPACE)
 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
 
-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
+static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
 {
-	struct dev_iter_state *state = seq->private;
 	struct net *net = seq_file_net(seq);
 	struct net_device *dev;
 	struct hlist_node *p;
 	struct hlist_head *h;
-	unsigned int count, bucket, offset;
+	unsigned int count = 0, offset = get_offset(*pos);
 
-	bucket = get_bucket(state->pos);
-	offset = get_offset(state->pos);
-	h = &net->dev_name_head[bucket];
-	count = 0;
+	h = &net->dev_name_head[get_bucket(*pos)];
 	hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
-		if (count++ == offset) {
-			state->pos = set_bucket_offset(bucket, count);
+		if (++count == offset)
 			return dev;
-		}
 	}
 
 	return NULL;
 }
 
-static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
+static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
 {
-	struct dev_iter_state *state = seq->private;
 	struct net_device *dev;
 	unsigned int bucket;
 
-	bucket = get_bucket(state->pos);
 	do {
-		dev = dev_from_same_bucket(seq);
+		dev = dev_from_same_bucket(seq, pos);
 		if (dev)
 			return dev;
 
-		bucket++;
-		state->pos = set_bucket_offset(bucket, 0);
+		bucket = get_bucket(*pos) + 1;
+		*pos = set_bucket_offset(bucket, 1);
 	} while (bucket < NETDEV_HASHENTRIES);
 
 	return NULL;
@@ -4087,33 +4075,20 @@
 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
 	__acquires(RCU)
 {
-	struct dev_iter_state *state = seq->private;
-
 	rcu_read_lock();
 	if (!*pos)
 		return SEQ_START_TOKEN;
 
-	/* check for end of the hash */
-	if (state->pos == 0 && *pos > 1)
+	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
 		return NULL;
 
-	return dev_from_new_bucket(seq);
+	return dev_from_bucket(seq, pos);
 }
 
 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct net_device *dev;
-
 	++*pos;
-
-	if (v == SEQ_START_TOKEN)
-		return dev_from_new_bucket(seq);
-
-	dev = dev_from_same_bucket(seq);
-	if (dev)
-		return dev;
-
-	return dev_from_new_bucket(seq);
+	return dev_from_bucket(seq, pos);
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4212,13 +4187,7 @@
 static int dev_seq_open(struct inode *inode, struct file *file)
 {
 	return seq_open_net(inode, file, &dev_seq_ops,
-			    sizeof(struct dev_iter_state));
-}
-
-int dev_seq_open_ops(struct inode *inode, struct file *file,
-		     const struct seq_operations *ops)
-{
-	return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+			    sizeof(struct seq_net_private));
 }
 
 static const struct file_operations dev_seq_fops = {
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 29c07fe..626698f 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -696,7 +696,8 @@
 
 static int dev_mc_seq_open(struct inode *inode, struct file *file)
 {
-	return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
+	return seq_open_net(inode, file, &dev_mc_seq_ops,
+			    sizeof(struct seq_net_private));
 }
 
 static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/filter.c b/net/core/filter.c
index cf4989a..6f755cc 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,8 +39,11 @@
 #include <linux/reciprocal_div.h>
 #include <linux/ratelimit.h>
 
-/* No hurry in this branch */
-static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
+/* No hurry in this branch
+ *
+ * Exported for the bpf jit load helper.
+ */
+void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
 {
 	u8 *ptr = NULL;
 
@@ -59,7 +62,7 @@
 {
 	if (k >= 0)
 		return skb_header_pointer(skb, k, size, buffer);
-	return __load_pointer(skb, k, size);
+	return bpf_internal_load_pointer_neg_helper(skb, k, size);
 }
 
 /**
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f223cdc..e598400 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -952,9 +952,11 @@
 		goto adjust_others;
 	}
 
-	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+	data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+		       gfp_mask);
 	if (!data)
 		goto nodata;
+	size = SKB_WITH_OVERHEAD(ksize(data));
 
 	/* Copy only real data... and, alas, header. This should be
 	 * optimized for the cases when header is void.
@@ -3161,6 +3163,8 @@
  */
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 {
+	int len = skb->len;
+
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
 	    (unsigned)sk->sk_rcvbuf)
 		return -ENOMEM;
@@ -3175,7 +3179,7 @@
 
 	skb_queue_tail(&sk->sk_error_queue, skb);
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk, len);
 	return 0;
 }
 EXPORT_SYMBOL(sock_queue_err_skb);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index de9da21..cf73cc7 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -74,16 +74,24 @@
 
 	iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
 	if (iph == NULL)
-		return -NF_DROP;
+		return -NF_ACCEPT;
 
 	/* Conntrack defragments packets, we might still see fragments
 	 * inside ICMP packets though. */
 	if (iph->frag_off & htons(IP_OFFSET))
-		return -NF_DROP;
+		return -NF_ACCEPT;
 
 	*dataoff = nhoff + (iph->ihl << 2);
 	*protonum = iph->protocol;
 
+	/* Check bogus IP headers */
+	if (*dataoff > skb->len) {
+		pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
+			 "nhoff %u, ihl %u, skblen %u\n",
+			 nhoff, iph->ihl << 2, skb->len);
+		return -NF_ACCEPT;
+	}
+
 	return NF_ACCEPT;
 }
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4dc1c10..167ea10 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2041,7 +2041,7 @@
 		if (err < 0)
 			goto e_err;
 	}
-	rth = rt_dst_alloc(init_net.loopback_dev,
+	rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
 			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
 	if (!rth)
 		goto e_nobufs;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cfd7edd..8bb6ade 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -701,11 +701,12 @@
 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
 	if (skb) {
 		if (sk_wmem_schedule(sk, skb->truesize)) {
+			skb_reserve(skb, sk->sk_prot->max_header);
 			/*
 			 * Make sure that we have exactly size bytes
 			 * available to the caller, no more, no less.
 			 */
-			skb_reserve(skb, skb_tailroom(skb) - size);
+			skb->avail_size = size;
 			return skb;
 		}
 		__kfree_skb(skb);
@@ -860,7 +861,7 @@
 	}
 
 out:
-	if (copied)
+	if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
 		tcp_push(sk, flags, mss_now, tp->nonagle);
 	return copied;
 
@@ -995,10 +996,9 @@
 				copy = seglen;
 
 			/* Where to copy to? */
-			if (skb_tailroom(skb) > 0) {
+			if (skb_availroom(skb) > 0) {
 				/* We have some space in skb head. Superb! */
-				if (copy > skb_tailroom(skb))
-					copy = skb_tailroom(skb);
+				copy = min_t(int, copy, skb_availroom(skb));
 				err = skb_add_data_nocache(sk, skb, from, copy);
 				if (err)
 					goto do_fault;
@@ -1452,7 +1452,7 @@
 		if ((available < target) &&
 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
 		    !sysctl_tcp_low_latency &&
-		    dma_find_channel(DMA_MEMCPY)) {
+		    net_dma_find_channel()) {
 			preempt_enable_no_resched();
 			tp->ucopy.pinned_list =
 					dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1667,7 +1667,7 @@
 		if (!(flags & MSG_TRUNC)) {
 #ifdef CONFIG_NET_DMA
 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+				tp->ucopy.dma_chan = net_dma_find_channel();
 
 			if (tp->ucopy.dma_chan) {
 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -3302,8 +3302,7 @@
 
 	tcp_init_mem(&init_net);
 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
-	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
-	limit = max(limit, 128UL);
+	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
 	max_share = min(4UL*1024*1024, limit);
 
 	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e886e2f..9944c1d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -474,8 +474,11 @@
 		if (!win_dep) {
 			m -= (new_sample >> 3);
 			new_sample += m;
-		} else if (m < new_sample)
-			new_sample = m << 3;
+		} else {
+			m <<= 3;
+			if (m < new_sample)
+				new_sample = m;
+		}
 	} else {
 		/* No previous measure. */
 		new_sample = m << 3;
@@ -5225,7 +5228,7 @@
 		return 0;
 
 	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-		tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+		tp->ucopy.dma_chan = net_dma_find_channel();
 
 	if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3a25cf7..0cb86ce 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1730,7 +1730,7 @@
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+			tp->ucopy.dma_chan = net_dma_find_channel();
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v4_do_rcv(sk, skb);
 		else
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 364784a..376b2cf 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2060,7 +2060,7 @@
 		/* Punt if not enough space exists in the first SKB for
 		 * the data in the second
 		 */
-		if (skb->len > skb_tailroom(to))
+		if (skb->len > skb_availroom(to))
 			break;
 
 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 16c33e3..b2869ca 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2044,7 +2044,7 @@
 		if (!delta)
 			pmc->mca_sfcount[sfmode]--;
 		for (j=0; j<i; j++)
-			(void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
+			ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
 	} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
 		struct ip6_sf_list *psf;
 
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0..9d4e155 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -78,19 +78,6 @@
 
    Hence the start of any table is given by get_table() below.  */
 
-/* Check for an extension */
-int
-ip6t_ext_hdr(u8 nexthdr)
-{
-	return  (nexthdr == IPPROTO_HOPOPTS)   ||
-		(nexthdr == IPPROTO_ROUTING)   ||
-		(nexthdr == IPPROTO_FRAGMENT)  ||
-		(nexthdr == IPPROTO_ESP)       ||
-		(nexthdr == IPPROTO_AH)        ||
-		(nexthdr == IPPROTO_NONE)      ||
-		(nexthdr == IPPROTO_DSTOPTS);
-}
-
 /* Returns whether matches rule or not. */
 /* Performance critical - called for every packet */
 static inline bool
@@ -2366,7 +2353,6 @@
 EXPORT_SYMBOL(ip6t_register_table);
 EXPORT_SYMBOL(ip6t_unregister_table);
 EXPORT_SYMBOL(ip6t_do_table);
-EXPORT_SYMBOL(ip6t_ext_hdr);
 EXPORT_SYMBOL(ipv6_find_hdr);
 
 module_init(ip6_tables_init);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 496b627..3992e26 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -881,6 +881,16 @@
 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
 }
 
+static struct dst_entry *ip6_route_input_lookup(struct net *net,
+						struct net_device *dev,
+						struct flowi6 *fl6, int flags)
+{
+	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
+		flags |= RT6_LOOKUP_F_IFACE;
+
+	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
+}
+
 void ip6_route_input(struct sk_buff *skb)
 {
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -895,10 +905,7 @@
 		.flowi6_proto = iph->nexthdr,
 	};
 
-	if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
-		flags |= RT6_LOOKUP_F_IFACE;
-
-	skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input));
+	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
 }
 
 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
@@ -2537,7 +2544,7 @@
 	struct sk_buff *skb;
 	struct rtmsg *rtm;
 	struct flowi6 fl6;
-	int err, iif = 0;
+	int err, iif = 0, oif = 0;
 
 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
 	if (err < 0)
@@ -2564,15 +2571,29 @@
 		iif = nla_get_u32(tb[RTA_IIF]);
 
 	if (tb[RTA_OIF])
-		fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]);
+		oif = nla_get_u32(tb[RTA_OIF]);
 
 	if (iif) {
 		struct net_device *dev;
+		int flags = 0;
+
 		dev = __dev_get_by_index(net, iif);
 		if (!dev) {
 			err = -ENODEV;
 			goto errout;
 		}
+
+		fl6.flowi6_iif = iif;
+
+		if (!ipv6_addr_any(&fl6.saddr))
+			flags |= RT6_LOOKUP_F_HAS_SADDR;
+
+		rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
+							       flags);
+	} else {
+		fl6.flowi6_oif = oif;
+
+		rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
 	}
 
 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
@@ -2587,7 +2608,6 @@
 	skb_reset_mac_header(skb);
 	skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
 
-	rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6);
 	skb_dst_set(skb, &rt->dst);
 
 	err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12c6ece..86cfe60 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1645,7 +1645,7 @@
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+			tp->ucopy.dma_chan = net_dma_find_channel();
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v6_do_rcv(sk, skb);
 		else
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 1068f66..64d3ce5 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -49,6 +49,8 @@
 		container_of(h, struct tid_ampdu_rx, rcu_head);
 	int i;
 
+	del_timer_sync(&tid_rx->reorder_timer);
+
 	for (i = 0; i < tid_rx->buf_size; i++)
 		dev_kfree_skb(tid_rx->reorder_buf[i]);
 	kfree(tid_rx->reorder_buf);
@@ -91,7 +93,6 @@
 				     tid, WLAN_BACK_RECIPIENT, reason);
 
 	del_timer_sync(&tid_rx->session_timer);
-	del_timer_sync(&tid_rx->reorder_timer);
 
 	call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
 }
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index cc5b7a6..778e591 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -15,12 +15,6 @@
 #include "rate.h"
 #include "debugfs.h"
 
-int mac80211_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 #define DEBUGFS_FORMAT_BUFFER_SIZE 100
 
 int mac80211_format_buffer(char __user *userbuf, size_t count,
@@ -50,7 +44,7 @@
 #define DEBUGFS_READONLY_FILE_OPS(name)			\
 static const struct file_operations name## _ops = {			\
 	.read = name## _read,						\
-	.open = mac80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
@@ -93,7 +87,7 @@
 
 static const struct file_operations reset_ops = {
 	.write = reset_write,
-	.open = mac80211_open_file_generic,
+	.open = simple_open,
 	.llseek = noop_llseek,
 };
 
@@ -254,7 +248,7 @@
 									\
 static const struct file_operations stats_ ##name## _ops = {		\
 	.read = stats_ ##name## _read,					\
-	.open = mac80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 7c87529..9be4e6d 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -3,7 +3,6 @@
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 extern void debugfs_hw_add(struct ieee80211_local *local);
-extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
 extern int mac80211_format_buffer(char __user *userbuf, size_t count,
 				  loff_t *ppos, char *fmt, ...);
 #else
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 59edcd9..7932767 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -30,7 +30,7 @@
 #define KEY_OPS(name)							\
 static const struct file_operations key_ ##name## _ops = {		\
 	.read = key_##name##_read,					\
-	.open = mac80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 }
 
@@ -45,7 +45,7 @@
 #define KEY_CONF_OPS(name)						\
 static const struct file_operations key_ ##name## _ops = {		\
 	.read = key_conf_##name##_read,					\
-	.open = mac80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 }
 
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index a32eeda..30f99c3 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -135,7 +135,7 @@
 static const struct file_operations name##_ops = {			\
 	.read = ieee80211_if_read_##name,				\
 	.write = (_write),						\
-	.open = mac80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 }
 
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 6d45804..832b2da 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -33,7 +33,7 @@
 #define STA_OPS(name)							\
 static const struct file_operations sta_ ##name## _ops = {		\
 	.read = sta_##name##_read,					\
-	.open = mac80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 }
 
@@ -41,7 +41,7 @@
 static const struct file_operations sta_ ##name## _ops = {		\
 	.read = sta_##name##_read,					\
 	.write = sta_##name##_write,					\
-	.open = mac80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 }
 
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b581a24..1633648 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -102,9 +102,6 @@
 
 	might_sleep();
 
-	/* If this off-channel logic ever changes,  ieee80211_on_oper_channel
-	 * may need to change as well.
-	 */
 	offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
 	if (local->scan_channel) {
 		chan = local->scan_channel;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 576fb25..f76da5b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3387,8 +3387,7 @@
 		 */
 		printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
 		       sdata->name, ifmgd->bssid);
-		assoc_data->timeout = jiffies +
-				TU_TO_EXP_TIME(req->bss->beacon_interval);
+		assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
 	} else {
 		assoc_data->have_beacon = true;
 		assoc_data->sent_assoc = false;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b4f7600..3313c11 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -145,7 +145,7 @@
 
 static const struct file_operations rcname_ops = {
 	.read = rcname_read,
-	.open = mac80211_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 #endif
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 33cd169..c70e176 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -370,7 +370,7 @@
 	 */
 	drv_sw_scan_start(local);
 
-	local->leave_oper_channel_time = 0;
+	local->leave_oper_channel_time = jiffies;
 	local->next_scan_state = SCAN_DECISION;
 	local->scan_channel_idx = 0;
 
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cbdb754..729f157 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -735,6 +735,7 @@
 
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
+	atomic_dec(&net->ct.count);
 	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
 	return ERR_PTR(-ENOMEM);
 #endif
@@ -1591,7 +1592,7 @@
 	return 0;
 
 err_timeout:
-	nf_conntrack_timeout_fini(net);
+	nf_conntrack_ecache_fini(net);
 err_ecache:
 	nf_conntrack_tstamp_fini(net);
 err_tstamp:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 361eade..0d07a1d 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -584,8 +584,8 @@
 			 * Let's try to use the data from the packet.
 			 */
 			sender->td_end = end;
-			win <<= sender->td_scale;
-			sender->td_maxwin = (win == 0 ? 1 : win);
+			swin = win << sender->td_scale;
+			sender->td_maxwin = (swin == 0 ? 1 : swin);
 			sender->td_maxend = end + sender->td_maxwin;
 			/*
 			 * We haven't seen traffic in the other direction yet
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 3eb348b..d98c868 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
+#include <linux/atomic.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
 #include <linux/slab.h>
@@ -17,7 +18,6 @@
 #include <linux/errno.h>
 #include <net/netlink.h>
 #include <net/sock.h>
-#include <asm/atomic.h>
 
 #include <linux/netfilter.h>
 #include <linux/netfilter/nfnetlink.h>
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0c8e438..59530e9 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -150,6 +150,17 @@
 	return ret;
 }
 
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
+{
+	typeof(nf_ct_timeout_put_hook) timeout_put;
+
+	timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
+	if (timeout_put)
+		timeout_put(timeout);
+}
+#endif
+
 static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
 {
 	struct xt_ct_target_info_v1 *info = par->targinfo;
@@ -158,7 +169,9 @@
 	struct nf_conn *ct;
 	int ret = 0;
 	u8 proto;
-
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+	struct ctnl_timeout *timeout;
+#endif
 	if (info->flags & ~XT_CT_NOTRACK)
 		return -EINVAL;
 
@@ -216,7 +229,6 @@
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 	if (info->timeout) {
 		typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
-		struct ctnl_timeout *timeout;
 		struct nf_conn_timeout *timeout_ext;
 
 		rcu_read_lock();
@@ -245,7 +257,7 @@
 				pr_info("Timeout policy `%s' can only be "
 					"used by L3 protocol number %d\n",
 					info->timeout, timeout->l3num);
-				goto err4;
+				goto err5;
 			}
 			/* Make sure the timeout policy matches any existing
 			 * protocol tracker, otherwise default to generic.
@@ -258,13 +270,13 @@
 					"used by L4 protocol number %d\n",
 					info->timeout,
 					timeout->l4proto->l4proto);
-				goto err4;
+				goto err5;
 			}
 			timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
-							    GFP_KERNEL);
+							    GFP_ATOMIC);
 			if (timeout_ext == NULL) {
 				ret = -ENOMEM;
-				goto err4;
+				goto err5;
 			}
 		} else {
 			ret = -ENOENT;
@@ -281,8 +293,12 @@
 	info->ct = ct;
 	return 0;
 
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+err5:
+	__xt_ct_tg_timeout_put(timeout);
 err4:
 	rcu_read_unlock();
+#endif
 err3:
 	nf_conntrack_free(ct);
 err2:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 32bb753..faa48f7 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -829,12 +829,19 @@
 	return 0;
 }
 
-int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
 {
 	int len = skb->len;
 
 	skb_queue_tail(&sk->sk_receive_queue, skb);
 	sk->sk_data_ready(sk, len);
+	return len;
+}
+
+int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+{
+	int len = __netlink_sendskb(sk, skb);
+
 	sock_put(sk);
 	return len;
 }
@@ -957,8 +964,7 @@
 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
 	    !test_bit(0, &nlk->state)) {
 		skb_set_owner_r(skb, sk);
-		skb_queue_tail(&sk->sk_receive_queue, skb);
-		sk->sk_data_ready(sk, skb->len);
+		__netlink_sendskb(sk, skb);
 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
 	}
 	return -1;
@@ -1698,10 +1704,8 @@
 
 		if (sk_filter(sk, skb))
 			kfree_skb(skb);
-		else {
-			skb_queue_tail(&sk->sk_receive_queue, skb);
-			sk->sk_data_ready(sk, skb->len);
-		}
+		else
+			__netlink_sendskb(sk, skb);
 		return 0;
 	}
 
@@ -1715,10 +1719,8 @@
 
 	if (sk_filter(sk, skb))
 		kfree_skb(skb);
-	else {
-		skb_queue_tail(&sk->sk_receive_queue, skb);
-		sk->sk_data_ready(sk, skb->len);
-	}
+	else
+		__netlink_sendskb(sk, skb);
 
 	if (cb->done)
 		cb->done(cb);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 7b76eb7..ef10ffc 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -474,7 +474,7 @@
 
 	while (remaining_len > 0) {
 
-		frag_len = min_t(u16, local->remote_miu, remaining_len);
+		frag_len = min_t(size_t, local->remote_miu, remaining_len);
 
 		pr_debug("Fragment %zd bytes remaining %zd",
 			 frag_len, remaining_len);
@@ -497,7 +497,7 @@
 		release_sock(sk);
 
 		remaining_len -= frag_len;
-		msg_ptr += len;
+		msg_ptr += frag_len;
 	}
 
 	kfree(msg_data);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9f60008..9726fe6 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1130,6 +1130,9 @@
 	int flags = msg->msg_flags;
 	int err, done;
 
+	if (len > USHRT_MAX)
+		return -EMSGSIZE;
+
 	if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
 				MSG_CMSG_COMPAT)) ||
 			!(msg->msg_flags & MSG_EOR))
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 1ab8689..906cc05 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -95,11 +95,11 @@
 	struct sockaddr *sa = addr;
 	int err;
 
-	if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len))
+	if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
 		return 0;
 
 	if (dev->flags & IFF_UP) {
-		err = rose_add_loopback_node((rose_address *)dev->dev_addr);
+		err = rose_add_loopback_node((rose_address *)sa->sa_data);
 		if (err)
 			return err;
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 06b42b7..92ba71d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4133,9 +4133,10 @@
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
 				  int __user *optlen)
 {
-	if (len < sizeof(struct sctp_event_subscribe))
+	if (len <= 0)
 		return -EINVAL;
-	len = sizeof(struct sctp_event_subscribe);
+	if (len > sizeof(struct sctp_event_subscribe))
+		len = sizeof(struct sctp_event_subscribe);
 	if (put_user(len, optlen))
 		return -EFAULT;
 	if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
diff --git a/net/socket.c b/net/socket.c
index 484cc69..851edcd 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -811,9 +811,9 @@
 
 	sock = file->private_data;
 
-	flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
-	if (more)
-		flags |= MSG_MORE;
+	flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+	/* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
+	flags |= more;
 
 	return kernel_sendpage(sock, page, offset, size, flags);
 }
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 39765bc..920cabe 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -13,12 +13,6 @@
 #include "core.h"
 #include "debugfs.h"
 
-static int cfg80211_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...)		\
 static ssize_t name## _read(struct file *file, char __user *userbuf,	\
 			    size_t count, loff_t *ppos)			\
@@ -33,7 +27,7 @@
 									\
 static const struct file_operations name## _ops = {			\
 	.read = name## _read,						\
-	.open = cfg80211_open_file_generic,				\
+	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
 
@@ -102,7 +96,7 @@
 
 static const struct file_operations ht40allow_map_ops = {
 	.read = ht40allow_map_read,
-	.open = cfg80211_open_file_generic,
+	.open = simple_open,
 	.llseek = default_llseek,
 };
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4c1eb94..f432c57 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1294,6 +1294,11 @@
 			goto bad_res;
 		}
 
+		if (!netif_running(netdev)) {
+			result = -ENETDOWN;
+			goto bad_res;
+		}
+
 		nla_for_each_nested(nl_txq_params,
 				    info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
 				    rem_txq_params) {
@@ -2386,7 +2391,9 @@
 }
 
 static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
-				int flags, struct net_device *dev,
+				int flags,
+				struct cfg80211_registered_device *rdev,
+				struct net_device *dev,
 				const u8 *mac_addr, struct station_info *sinfo)
 {
 	void *hdr;
@@ -2425,12 +2432,18 @@
 	if (sinfo->filled & STATION_INFO_PLINK_STATE)
 		NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE,
 			    sinfo->plink_state);
-	if (sinfo->filled & STATION_INFO_SIGNAL)
-		NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
-			   sinfo->signal);
-	if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
-		NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
-			   sinfo->signal_avg);
+	switch (rdev->wiphy.signal_type) {
+	case CFG80211_SIGNAL_TYPE_MBM:
+		if (sinfo->filled & STATION_INFO_SIGNAL)
+			NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
+				   sinfo->signal);
+		if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
+			NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
+				   sinfo->signal_avg);
+		break;
+	default:
+		break;
+	}
 	if (sinfo->filled & STATION_INFO_TX_BITRATE) {
 		if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
 					  NL80211_STA_INFO_TX_BITRATE))
@@ -2523,7 +2536,7 @@
 		if (nl80211_send_station(skb,
 				NETLINK_CB(cb->skb).pid,
 				cb->nlh->nlmsg_seq, NLM_F_MULTI,
-				netdev, mac_addr,
+				dev, netdev, mac_addr,
 				&sinfo) < 0)
 			goto out;
 
@@ -2568,7 +2581,7 @@
 		return -ENOMEM;
 
 	if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0,
-				 dev, mac_addr, &sinfo) < 0) {
+				 rdev, dev, mac_addr, &sinfo) < 0) {
 		nlmsg_free(msg);
 		return -ENOBUFS;
 	}
@@ -6376,7 +6389,7 @@
 		.doit = nl80211_get_key,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6408,7 +6421,7 @@
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 		.doit = nl80211_set_beacon,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6416,7 +6429,7 @@
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 		.doit = nl80211_start_ap,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6424,7 +6437,7 @@
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 		.doit = nl80211_stop_ap,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6440,7 +6453,7 @@
 		.doit = nl80211_set_station,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6456,7 +6469,7 @@
 		.doit = nl80211_del_station,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6489,7 +6502,7 @@
 		.doit = nl80211_del_mpath,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6497,7 +6510,7 @@
 		.doit = nl80211_set_bss,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6523,7 +6536,7 @@
 		.doit = nl80211_get_mesh_config,
 		.policy = nl80211_policy,
 		/* can be retrieved by unprivileged users */
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6656,7 +6669,7 @@
 		.doit = nl80211_setdel_pmksa,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6664,7 +6677,7 @@
 		.doit = nl80211_setdel_pmksa,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6672,7 +6685,7 @@
 		.doit = nl80211_flush_pmksa,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -6832,7 +6845,7 @@
 		.doit = nl80211_probe_client,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
@@ -7596,7 +7609,8 @@
 	if (!msg)
 		return;
 
-	if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
+	if (nl80211_send_station(msg, 0, 0, 0,
+				 rdev, dev, mac_addr, sinfo) < 0) {
 		nlmsg_free(msg);
 		return;
 	}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 0af7f54..af648e0 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -780,8 +780,10 @@
 		if (cmd == SIOCSIWENCODEEXT) {
 			struct iw_encode_ext *ee = (void *) extra;
 
-			if (iwp->length < sizeof(*ee) + ee->key_len)
-				return -EFAULT;
+			if (iwp->length < sizeof(*ee) + ee->key_len) {
+				err = -EFAULT;
+				goto out;
+			}
 		}
 	}
 
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index d897278..6a3ee98 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -104,7 +104,7 @@
 # Usage: cflags-y += $(call as-instr,instr,option1,option2)
 
 as-instr = $(call try-run,\
-	/bin/echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
+	printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
 
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index d2b366c1..ff1720d 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -69,6 +69,7 @@
 warning-1 += -Wold-style-definition
 warning-1 += $(call cc-option, -Wmissing-include-dirs)
 warning-1 += $(call cc-option, -Wunused-but-set-variable)
+warning-1 += $(call cc-disable-warning, missing-field-initializers)
 
 warning-2 := -Waggregate-return
 warning-2 += -Wcast-align
@@ -76,6 +77,7 @@
 warning-2 += -Wnested-externs
 warning-2 += -Wshadow
 warning-2 += $(call cc-option, -Wlogical-op)
+warning-2 += $(call cc-option, -Wmissing-field-initializers)
 
 warning-3 := -Wbad-function-cast
 warning-3 += -Wcast-qual
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 00c368c..0be6f11 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -304,6 +304,30 @@
 	lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
 	(rm -f $@ ; false)
 
+# U-Boot mkimage
+# ---------------------------------------------------------------------------
+
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+# SRCARCH just happens to match slightly more than ARCH (on sparc), so reduces
+# the number of overrides in arch makefiles
+UIMAGE_ARCH ?= $(SRCARCH)
+UIMAGE_COMPRESSION ?= $(if $(2),$(2),none)
+UIMAGE_OPTS-y ?=
+UIMAGE_TYPE ?= kernel
+UIMAGE_LOADADDR ?= arch_must_set_this
+UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR)
+UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)'
+UIMAGE_IN ?= $<
+UIMAGE_OUT ?= $@
+
+quiet_cmd_uimage = UIMAGE  $(UIMAGE_OUT)
+      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \
+			-C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \
+			-T $(UIMAGE_TYPE) \
+			-a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \
+			-n $(UIMAGE_NAME) -d $(UIMAGE_IN) $(UIMAGE_OUT)
+
 # XZ
 # ---------------------------------------------------------------------------
 # Use xzkern to compress the kernel image and xzmisc to compress other things.
diff --git a/scripts/coccinelle/api/ptr_ret.cocci b/scripts/coccinelle/api/ptr_ret.cocci
new file mode 100644
index 0000000..cbfd08c
--- /dev/null
+++ b/scripts/coccinelle/api/ptr_ret.cocci
@@ -0,0 +1,70 @@
+///
+/// Use PTR_RET rather than if(IS_ERR(...)) + PTR_ERR
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Options: -no_includes -include_headers
+//
+// Keywords: ERR_PTR, PTR_ERR, PTR_RET
+// Version min: 2.6.39
+//
+
+virtual context
+virtual patch
+virtual org
+virtual report
+
+@depends on patch@
+expression ptr;
+@@
+
+- if (IS_ERR(ptr)) return PTR_ERR(ptr); else return 0;
++ return PTR_RET(ptr);
+
+@depends on patch@
+expression ptr;
+@@
+
+- if (IS_ERR(ptr)) return PTR_ERR(ptr); return 0;
++ return PTR_RET(ptr);
+
+@r1 depends on !patch@
+expression ptr;
+position p1;
+@@
+
+* if@p1 (IS_ERR(ptr)) return PTR_ERR(ptr); else return 0;
+
+@r2 depends on !patch@
+expression ptr;
+position p2;
+@@
+
+* if@p2 (IS_ERR(ptr)) return PTR_ERR(ptr); return 0;
+
+@script:python depends on org@
+p << r1.p1;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING: PTR_RET can be used")
+
+
+@script:python depends on org@
+p << r2.p2;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING: PTR_RET can be used")
+
+@script:python depends on report@
+p << r1.p1;
+@@
+
+coccilib.report.print_report(p[0], "WARNING: PTR_RET can be used")
+
+@script:python depends on report@
+p << r2.p2;
+@@
+
+coccilib.report.print_report(p[0], "WARNING: PTR_RET can be used")
diff --git a/scripts/coccinelle/api/simple_open.cocci b/scripts/coccinelle/api/simple_open.cocci
new file mode 100644
index 0000000..05962f7
--- /dev/null
+++ b/scripts/coccinelle/api/simple_open.cocci
@@ -0,0 +1,70 @@
+/// This removes an open coded simple_open() function
+/// and replaces file operations references to the function
+/// with simple_open() instead.
+///
+// Confidence: High
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual patch
+virtual report
+
+@ open depends on patch @
+identifier open_f != simple_open;
+identifier i, f;
+@@
+-int open_f(struct inode *i, struct file *f)
+-{
+(
+-if (i->i_private)
+-f->private_data = i->i_private;
+|
+-f->private_data = i->i_private;
+)
+-return 0;
+-}
+
+@ has_open depends on open @
+identifier fops;
+identifier open.open_f;
+@@
+struct file_operations fops = {
+...,
+-.open = open_f,
++.open = simple_open,
+...
+};
+
+@ openr depends on report @
+identifier open_f != simple_open;
+identifier i, f;
+position p;
+@@
+int open_f@p(struct inode *i, struct file *f)
+{
+(
+if (i->i_private)
+f->private_data = i->i_private;
+|
+f->private_data = i->i_private;
+)
+return 0;
+}
+
+@ has_openr depends on openr @
+identifier fops;
+identifier openr.open_f;
+position p;
+@@
+struct file_operations fops = {
+...,
+.open = open_f@p,
+...
+};
+
+@script:python@
+pf << openr.p;
+ps << has_openr.p;
+@@
+
+coccilib.report.print_report(pf[0],"WARNING opportunity for simple_open, see also structure on line %s"%(ps[0].line))
diff --git a/scripts/coccinelle/free/clk_put.cocci b/scripts/coccinelle/free/clk_put.cocci
new file mode 100644
index 0000000..46747ad
--- /dev/null
+++ b/scripts/coccinelle/free/clk_put.cocci
@@ -0,0 +1,67 @@
+/// Find missing clk_puts.
+///
+//# This only signals a missing clk_put when there is a clk_put later
+//# in the same function.
+//# False positives can be due to loops.
+//
+// Confidence: Moderate
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options:
+
+virtual context
+virtual org
+virtual report
+
+@clk@
+expression e;
+statement S,S1;
+int ret;
+position p1,p2,p3;
+@@
+
+e = clk_get@p1(...)
+... when != clk_put(e)
+if (<+...e...+>) S
+... when any
+    when != clk_put(e)
+    when != if (...) { ... clk_put(e); ... }
+(
+ if (ret == 0) S1
+|
+if (...)
+   { ...
+     return 0; }
+|
+if (...)
+   { ...
+     return <+...e...+>; }
+|
+*if@p2 (...)
+   { ... when != clk_put(e)
+         when forall
+     return@p3 ...; }
+)
+... when any
+clk_put(e);
+
+@script:python depends on org@
+p1 << clk.p1;
+p2 << clk.p2;
+p3 << clk.p3;
+@@
+
+cocci.print_main("clk_get",p1)
+cocci.print_secs("if",p2)
+cocci.print_secs("needed clk_put",p3)
+
+@script:python depends on report@
+p1 << clk.p1;
+p2 << clk.p2;
+p3 << clk.p3;
+@@
+
+msg = "ERROR: missing clk_put; clk_get on line %s and execution via conditional on line %s" % (p1[0].line,p2[0].line)
+coccilib.report.print_report(p3[0],msg)
diff --git a/scripts/coccinelle/free/iounmap.cocci b/scripts/coccinelle/free/iounmap.cocci
new file mode 100644
index 0000000..5384f4ba
--- /dev/null
+++ b/scripts/coccinelle/free/iounmap.cocci
@@ -0,0 +1,67 @@
+/// Find missing iounmaps.
+///
+//# This only signals a missing iounmap when there is an iounmap later
+//# in the same function.
+//# False positives can be due to loops.
+//
+// Confidence: Moderate
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options:
+
+virtual context
+virtual org
+virtual report
+
+@iom@
+expression e;
+statement S,S1;
+int ret;
+position p1,p2,p3;
+@@
+
+e = \(ioremap@p1\|ioremap_nocache@p1\)(...)
+... when != iounmap(e)
+if (<+...e...+>) S
+... when any
+    when != iounmap(e)
+    when != if (...) { ... iounmap(e); ... }
+(
+ if (ret == 0) S1
+|
+if (...)
+   { ...
+     return 0; }
+|
+if (...)
+   { ...
+     return <+...e...+>; }
+|
+*if@p2 (...)
+   { ... when != iounmap(e)
+         when forall
+     return@p3 ...; }
+)
+... when any
+iounmap(e);
+
+@script:python depends on org@
+p1 << iom.p1;
+p2 << iom.p2;
+p3 << iom.p3;
+@@
+
+cocci.print_main("ioremap",p1)
+cocci.print_secs("if",p2)
+cocci.print_secs("needed iounmap",p3)
+
+@script:python depends on report@
+p1 << iom.p1;
+p2 << iom.p2;
+p3 << iom.p3;
+@@
+
+msg = "ERROR: missing iounmap; ioremap on line %s and execution via conditional on line %s" % (p1[0].line,p2[0].line)
+coccilib.report.print_report(p3[0],msg)
diff --git a/scripts/coccinelle/misc/boolinit.cocci b/scripts/coccinelle/misc/boolinit.cocci
new file mode 100644
index 0000000..97ce41c
--- /dev/null
+++ b/scripts/coccinelle/misc/boolinit.cocci
@@ -0,0 +1,178 @@
+/// Bool initializations should use true and false.  Bool tests don't need
+/// comparisons.  Based on contributions from Joe Perches, Rusty Russell
+/// and Bruce W Allan.
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Options: -include_headers
+
+virtual patch
+virtual context
+virtual org
+virtual report
+
+@depends on patch@
+bool t;
+symbol true;
+symbol false;
+@@
+
+(
+- t == true
++ t
+|
+- true == t
++ t
+|
+- t != true
++ !t
+|
+- true != t
++ !t
+|
+- t == false
++ !t
+|
+- false == t
++ !t
+|
+- t != false
++ t
+|
+- false != t
++ t
+)
+
+@depends on patch disable is_zero, isnt_zero@
+bool t;
+@@
+
+(
+- t == 1
++ t
+|
+- t != 1
++ !t
+|
+- t == 0
++ !t
+|
+- t != 0
++ t
+)
+
+@depends on patch@
+bool b;
+@@
+(
+ b =
+- 0
++ false
+|
+ b =
+- 1
++ true
+)
+
+// ---------------------------------------------------------------------
+
+@r1 depends on !patch@
+bool t;
+position p;
+@@
+
+(
+* t@p == true
+|
+* true == t@p
+|
+* t@p != true
+|
+* true != t@p
+|
+* t@p == false
+|
+* false == t@p
+|
+* t@p != false
+|
+* false != t@p
+)
+
+@r2 depends on !patch disable is_zero, isnt_zero@
+bool t;
+position p;
+@@
+
+(
+* t@p == 1
+|
+* t@p != 1
+|
+* t@p == 0
+|
+* t@p != 0
+)
+
+@r3 depends on !patch@
+bool b;
+position p1,p2;
+constant c;
+@@
+(
+*b@p1 = 0
+|
+*b@p1 = 1
+|
+*b@p2 = c
+)
+
+@script:python depends on org@
+p << r1.p;
+@@
+
+cocci.print_main("WARNING: Comparison to bool",p)
+
+@script:python depends on org@
+p << r2.p;
+@@
+
+cocci.print_main("WARNING: Comparison of bool to 0/1",p)
+
+@script:python depends on org@
+p1 << r3.p1;
+@@
+
+cocci.print_main("WARNING: Assignment of bool to 0/1",p1)
+
+@script:python depends on org@
+p2 << r3.p2;
+@@
+
+cocci.print_main("ERROR: Assignment of bool to non-0/1 constant",p2)
+
+@script:python depends on report@
+p << r1.p;
+@@
+
+coccilib.report.print_report(p[0],"WARNING: Comparison to bool")
+
+@script:python depends on report@
+p << r2.p;
+@@
+
+coccilib.report.print_report(p[0],"WARNING: Comparison of bool to 0/1")
+
+@script:python depends on report@
+p1 << r3.p1;
+@@
+
+coccilib.report.print_report(p1[0],"WARNING: Assignment of bool to 0/1")
+
+@script:python depends on report@
+p2 << r3.p2;
+@@
+
+coccilib.report.print_report(p2[0],"ERROR: Assignment of bool to non-0/1 constant")
diff --git a/scripts/coccinelle/misc/cstptr.cocci b/scripts/coccinelle/misc/cstptr.cocci
new file mode 100644
index 0000000..d425644
--- /dev/null
+++ b/scripts/coccinelle/misc/cstptr.cocci
@@ -0,0 +1,41 @@
+/// PTR_ERR should be applied before its argument is reassigned, typically
+/// to NULL
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r exists@
+expression e,e1;
+constant c;
+position p1,p2;
+@@
+
+*e@p1 = c
+... when != e = e1
+    when != &e
+    when != true IS_ERR(e)
+*PTR_ERR@p2(e)
+
+@script:python depends on org@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+cocci.print_main("PTR_ERR",p2)
+cocci.print_secs("assignment",p1)
+
+@script:python depends on report@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+msg = "ERROR: PTR_ERR applied after initialization to constant on line %s" % (p1[0].line)
+coccilib.report.print_report(p2[0],msg)
diff --git a/scripts/coccinelle/null/badzero.cocci b/scripts/coccinelle/null/badzero.cocci
new file mode 100644
index 0000000..d79baf7
--- /dev/null
+++ b/scripts/coccinelle/null/badzero.cocci
@@ -0,0 +1,237 @@
+/// Compare pointer-typed values to NULL rather than 0
+///
+//# This makes an effort to choose between !x and x == NULL.  !x is used
+//# if it has previously been used with the function used to initialize x.
+//# This relies on type information.  More type information can be obtained
+//# using the option -all_includes and the option -I to specify an
+//# include path.
+//
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options:
+
+virtual patch
+virtual context
+virtual org
+virtual report
+
+@initialize:ocaml@
+let negtable = Hashtbl.create 101
+
+@depends on patch@
+expression *E;
+identifier f;
+@@
+
+(
+  (E = f(...)) ==
+- 0
++ NULL
+|
+  (E = f(...)) !=
+- 0
++ NULL
+|
+- 0
++ NULL
+  == (E = f(...))
+|
+- 0
++ NULL
+  != (E = f(...))
+)
+
+
+@t1 depends on !patch@
+expression *E;
+identifier f;
+position p;
+@@
+
+(
+  (E = f(...)) ==
+* 0@p
+|
+  (E = f(...)) !=
+* 0@p
+|
+* 0@p
+  == (E = f(...))
+|
+* 0@p
+  != (E = f(...))
+)
+
+@script:python depends on org@
+p << t1.p;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0")
+
+@script:python depends on report@
+p << t1.p;
+@@
+
+coccilib.report.print_report(p[0], "WARNING comparing pointer to 0")
+
+// Tests of returned values
+
+@s@
+identifier f;
+expression E,E1;
+@@
+
+ E = f(...)
+ ... when != E = E1
+ !E
+
+@script:ocaml depends on s@
+f << s.f;
+@@
+
+try let _ = Hashtbl.find negtable f in ()
+with Not_found -> Hashtbl.add negtable f ()
+
+@ r disable is_zero,isnt_zero exists @
+expression *E;
+identifier f;
+@@
+
+E = f(...)
+...
+(E == 0
+|E != 0
+|0 == E
+|0 != E
+)
+
+@script:ocaml@
+f << r.f;
+@@
+
+try let _ = Hashtbl.find negtable f in ()
+with Not_found -> include_match false
+
+// This rule may lead to inconsistent path problems, if E is defined in two
+// places
+@ depends on patch disable is_zero,isnt_zero @
+expression *E;
+expression E1;
+identifier r.f;
+@@
+
+E = f(...)
+<...
+(
+- E == 0
++ !E
+|
+- E != 0
++ E
+|
+- 0 == E
++ !E
+|
+- 0 != E
++ E
+)
+...>
+?E = E1
+
+@t2 depends on !patch disable is_zero,isnt_zero @
+expression *E;
+expression E1;
+identifier r.f;
+position p1;
+position p2;
+@@
+
+E = f(...)
+<...
+(
+* E == 0@p1
+|
+* E != 0@p2
+|
+* 0@p1 == E
+|
+* 0@p1 != E
+)
+...>
+?E = E1
+
+@script:python depends on org@
+p << t2.p1;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0, suggest !E")
+
+@script:python depends on org@
+p << t2.p2;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0")
+
+@script:python depends on report@
+p << t2.p1;
+@@
+
+coccilib.report.print_report(p[0], "WARNING comparing pointer to 0, suggest !E")
+
+@script:python depends on report@
+p << t2.p2;
+@@
+
+coccilib.report.print_report(p[0], "WARNING comparing pointer to 0")
+
+@ depends on patch disable is_zero,isnt_zero @
+expression *E;
+@@
+
+(
+  E ==
+- 0
++ NULL
+|
+  E !=
+- 0
++ NULL
+|
+- 0
++ NULL
+  == E
+|
+- 0
++ NULL
+  != E
+)
+
+@ t3 depends on !patch disable is_zero,isnt_zero @
+expression *E;
+position p;
+@@
+
+(
+* E == 0@p
+|
+* E != 0@p
+|
+* 0@p == E
+|
+* 0@p != E
+)
+
+@script:python depends on org@
+p << t3.p;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0")
+
+@script:python depends on report@
+p << t3.p;
+@@
+
+coccilib.report.print_report(p[0], "WARNING comparing pointer to 0")
diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c
index 451c92d..2ef5e2e 100644
--- a/scripts/dtc/dtc.c
+++ b/scripts/dtc/dtc.c
@@ -101,7 +101,7 @@
 	const char *outform = "dts";
 	const char *outname = "-";
 	const char *depname = NULL;
-	int force = 0, check = 0, sort = 0;
+	int force = 0, sort = 0;
 	const char *arg;
 	int opt;
 	FILE *outf = NULL;
@@ -143,9 +143,6 @@
 		case 'f':
 			force = 1;
 			break;
-		case 'c':
-			check = 1;
-			break;
 		case 'q':
 			quiet++;
 			break;
diff --git a/scripts/dtc/flattree.c b/scripts/dtc/flattree.c
index ead0332..28d0b23 100644
--- a/scripts/dtc/flattree.c
+++ b/scripts/dtc/flattree.c
@@ -697,7 +697,6 @@
 {
 	struct reserve_info *reservelist = NULL;
 	struct reserve_info *new;
-	const char *p;
 	struct fdt_reserve_entry re;
 
 	/*
@@ -706,7 +705,6 @@
 	 *
 	 * First pass, count entries.
 	 */
-	p = inb->ptr;
 	while (1) {
 		flat_read_chunk(inb, &re, sizeof(re));
 		re.address  = fdt64_to_cpu(re.address);
diff --git a/scripts/headers_check.pl b/scripts/headers_check.pl
index 7957e7a..64ac238 100644
--- a/scripts/headers_check.pl
+++ b/scripts/headers_check.pl
@@ -19,6 +19,7 @@
 # 3) Check for leaked CONFIG_ symbols
 
 use strict;
+use File::Basename;
 
 my ($dir, $arch, @files) = @ARGV;
 
@@ -99,6 +100,39 @@
 }
 
 my $linux_types;
+my %import_stack = ();
+sub check_include_typesh
+{
+	my $path = $_[0];
+	my $import_path;
+
+	my $fh;
+	my @file_paths = ($path, $dir . "/" .  $path, dirname($filename) . "/" . $path);
+	for my $possible ( @file_paths ) {
+	    if (not $import_stack{$possible} and open($fh, '<', $possible)) {
+		$import_path = $possible;
+		$import_stack{$import_path} = 1;
+		last;
+	    }
+	}
+	if (eof $fh) {
+	    return;
+	}
+
+	my $line;
+	while ($line = <$fh>) {
+		if ($line =~ m/^\s*#\s*include\s+<linux\/types.h>/) {
+			$linux_types = 1;
+			last;
+		}
+		if (my $included = ($line =~ /^\s*#\s*include\s+[<"](\S+)[>"]/)[0]) {
+			check_include_typesh($included);
+		}
+	}
+	close $fh;
+	delete $import_stack{$import_path};
+}
+
 sub check_sizetypes
 {
 	if ($filename =~ /types.h|int-l64.h|int-ll64.h/o) {
@@ -113,6 +147,9 @@
 		$linux_types = 1;
 		return;
 	}
+	if (my $included = ($line =~ /^\s*#\s*include\s+[<"](\S+)[>"]/)[0]) {
+		check_include_typesh($included);
+	}
 	if ($line =~ m/__[us](8|16|32|64)\b/) {
 		printf STDERR "$filename:$lineno: " .
 		              "found __[us]{8,16,32,64} type " .
@@ -122,4 +159,3 @@
 		#$ret = 1;
 	}
 }
-
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 7c7a5a6..0586085 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -344,10 +344,8 @@
 
 int conf_read(const char *name)
 {
-	struct symbol *sym, *choice_sym;
-	struct property *prop;
-	struct expr *e;
-	int i, flags;
+	struct symbol *sym;
+	int i;
 
 	sym_set_change_count(0);
 
@@ -357,7 +355,7 @@
 	for_all_symbols(i, sym) {
 		sym_calc_value(sym);
 		if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO))
-			goto sym_ok;
+			continue;
 		if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) {
 			/* check that calculated value agrees with saved value */
 			switch (sym->type) {
@@ -366,30 +364,18 @@
 				if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym))
 					break;
 				if (!sym_is_choice(sym))
-					goto sym_ok;
+					continue;
 				/* fall through */
 			default:
 				if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val))
-					goto sym_ok;
+					continue;
 				break;
 			}
 		} else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE))
 			/* no previous value and not saved */
-			goto sym_ok;
+			continue;
 		conf_unsaved++;
 		/* maybe print value in verbose mode... */
-	sym_ok:
-		if (!sym_is_choice(sym))
-			continue;
-		/* The choice symbol only has a set value (and thus is not new)
-		 * if all its visible childs have values.
-		 */
-		prop = sym_get_choice_prop(sym);
-		flags = sym->flags;
-		expr_list_for_each_sym(prop->expr, e, choice_sym)
-			if (choice_sym->visible != no)
-				flags &= choice_sym->flags;
-		sym->flags &= flags | ~SYMBOL_DEF_USER;
 	}
 
 	for_all_symbols(i, sym) {
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
old mode 100644
new mode 100755
index ceadf0e..974d5cb
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -31,10 +31,12 @@
 	echo "  -h    display this help text"
 	echo "  -m    only merge the fragments, do not execute the make command"
 	echo "  -n    use allnoconfig instead of alldefconfig"
+	echo "  -r    list redundant entries when merging fragments"
 }
 
 MAKE=true
 ALLTARGET=alldefconfig
+WARNREDUN=false
 
 while true; do
 	case $1 in
@@ -52,18 +54,27 @@
 		usage
 		exit
 		;;
+	"-r")
+		WARNREDUN=true
+		shift
+		continue
+		;;
 	*)
 		break
 		;;
 	esac
 done
 
-
+INITFILE=$1
+shift;
 
 MERGE_LIST=$*
 SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p"
 TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
 
+echo "Using $INITFILE as base"
+cat $INITFILE > $TMP_FILE
+
 # Merge files, printing warnings on overrided values
 for MERGE_FILE in $MERGE_LIST ; do
 	echo "Merging $MERGE_FILE"
@@ -79,6 +90,8 @@
 			echo Previous  value: $PREV_VAL
 			echo New value:       $NEW_VAL
 			echo
+			elif [ "$WARNREDUN" = "true" ]; then
+			echo Value of $CFG is redundant by fragment $MERGE_FILE:
 			fi
 			sed -i "/$CFG[ =]/d" $TMP_FILE
 		fi
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 071f00c..22a3c40 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -262,11 +262,18 @@
 	struct symbol *def_sym;
 	struct property *prop;
 	struct expr *e;
+	int flags;
 
 	/* first calculate all choice values' visibilities */
+	flags = sym->flags;
 	prop = sym_get_choice_prop(sym);
-	expr_list_for_each_sym(prop->expr, e, def_sym)
+	expr_list_for_each_sym(prop->expr, e, def_sym) {
 		sym_calc_visibility(def_sym);
+		if (def_sym->visible != no)
+			flags &= def_sym->flags;
+	}
+
+	sym->flags &= flags | ~SYMBOL_DEF_USER;
 
 	/* is the user choice visible? */
 	def_sym = sym->def[S_DEF_USER].val;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 9adb667..c4e7d15 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -132,8 +132,10 @@
 	/* strip trailing .o */
 	s = strrchr(p, '.');
 	if (s != NULL)
-		if (strcmp(s, ".o") == 0)
+		if (strcmp(s, ".o") == 0) {
 			*s = '\0';
+			mod->is_dot_o = 1;
+		}
 
 	/* add to list */
 	mod->name = p;
@@ -587,7 +589,8 @@
 	unsigned int crc;
 	enum export export;
 
-	if (!is_vmlinux(mod->name) && strncmp(symname, "__ksymtab", 9) == 0)
+	if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
+	    strncmp(symname, "__ksymtab", 9) == 0)
 		export = export_from_secname(info, get_secindex(info, sym));
 	else
 		export = export_from_sec(info, get_secindex(info, sym));
@@ -849,7 +852,7 @@
 
 #define ALL_INIT_DATA_SECTIONS \
 	".init.setup$", ".init.rodata$", \
-	".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$" \
+	".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$", \
 	".init.data$", ".devinit.data$", ".cpuinit.data$", ".meminit.data$"
 #define ALL_EXIT_DATA_SECTIONS \
 	".exit.data$", ".devexit.data$", ".cpuexit.data$", ".memexit.data$"
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 2031119..51207e4 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -113,6 +113,7 @@
 	int has_cleanup;
 	struct buffer dev_table_buf;
 	char	     srcversion[25];
+	int is_dot_o;
 };
 
 struct elf_info {
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 3c6c0b1..eee5f8e 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -97,6 +97,7 @@
 mkdir -p "$libc_headers_dir/usr/share/doc/$libc_headers_packagename"
 mkdir -m 755 -p "$kernel_headers_dir/DEBIAN"
 mkdir -p "$kernel_headers_dir/usr/share/doc/$kernel_headers_packagename"
+mkdir -p "$kernel_headers_dir/lib/modules/$version/"
 if [ "$ARCH" = "um" ] ; then
 	mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin"
 fi
@@ -120,15 +121,19 @@
 fi
 
 if grep -q '^CONFIG_MODULES=y' .config ; then
-	INSTALL_MOD_PATH="$tmpdir" make KBUILD_SRC= modules_install
+	INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_install
+	rm -f "$tmpdir/lib/modules/$version/build"
+	rm -f "$tmpdir/lib/modules/$version/source"
 	if [ "$ARCH" = "um" ] ; then
 		mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/"
 		rmdir "$tmpdir/lib/modules/$version"
 	fi
 fi
 
-make headers_check
-make headers_install INSTALL_HDR_PATH="$libc_headers_dir/usr"
+if [ "$ARCH" != "um" ]; then
+	$MAKE headers_check KBUILD_SRC=
+	$MAKE headers_install KBUILD_SRC= INSTALL_HDR_PATH="$libc_headers_dir/usr"
+fi
 
 # Install the maintainer scripts
 # Note: hook scripts under /etc/kernel are also executed by official Debian
@@ -245,6 +250,7 @@
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
 (cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
+ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
 rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
 arch=$(dpkg --print-architecture)
 
@@ -259,8 +265,6 @@
  This is useful for people who need to build external modules
 EOF
 
-create_package "$kernel_headers_packagename" "$kernel_headers_dir"
-
 # Do we have firmware? Move it out of the way and build it into a package.
 if [ -e "$tmpdir/lib/firmware" ]; then
 	mv "$tmpdir/lib/firmware" "$fwdir/lib/"
@@ -287,7 +291,11 @@
  are used by the installed headers for GNU glibc and other system libraries.
 EOF
 
-create_package "$libc_headers_packagename" "$libc_headers_dir"
+if [ "$ARCH" != "um" ]; then
+	create_package "$kernel_headers_packagename" "$kernel_headers_dir"
+	create_package "$libc_headers_packagename" "$libc_headers_dir"
+fi
+
 create_package "$packagename" "$tmpdir"
 
 exit 0
diff --git a/scripts/patch-kernel b/scripts/patch-kernel
index 20fb25c..d000ea3 100755
--- a/scripts/patch-kernel
+++ b/scripts/patch-kernel
@@ -116,6 +116,10 @@
 		ext=".bz2"
 		name="bzip2"
 		uncomp="bunzip2 -dc"
+  elif [ -r ${filebase}.xz ]; then
+                ext=".xz"
+                name="xz"
+                uncomp="xz -dc"
   elif [ -r ${filebase}.zip ]; then
 		ext=".zip"
 		name="zip"
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 4d40384..bd6dca8 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -75,8 +75,7 @@
 		[ -w . ] && git update-index --refresh --unmerged > /dev/null
 
 		# Check for uncommitted changes
-		if git diff-index --name-only HEAD | grep -v "^scripts/package" \
-		    | read dummy; then
+		if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
 			printf '%s' -dirty
 		fi
 
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 833813a..cf7b12f 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -116,7 +116,7 @@
 
 dogtags()
 {
-	all_sources | gtags -f -
+	all_sources | gtags -i -f -
 }
 
 exuberant()
@@ -166,9 +166,6 @@
 	all_defconfigs | xargs -r $1 -a                         \
 	--langdef=dotconfig --language-force=dotconfig          \
 	--regex-dotconfig='/^#?[[:blank:]]*(CONFIG_[[:alnum:]_]+)/\1/'
-
-	# Remove structure forward declarations.
-	LANG=C sed -i -e '/^\([a-zA-Z_][a-zA-Z0-9_]*\)\t.*\t\/\^struct \1;.*\$\/;"\tx$/d' tags
 }
 
 emacs()
@@ -233,6 +230,7 @@
 	fi
 fi
 
+remove_structs=
 case "$1" in
 	"cscope")
 		docscope
@@ -245,10 +243,17 @@
 	"tags")
 		rm -f tags
 		xtags ctags
+		remove_structs=y
 		;;
 
 	"TAGS")
 		rm -f TAGS
 		xtags etags
+		remove_structs=y
 		;;
 esac
+
+# Remove structure forward declarations.
+if [ -n "$remove_structs" ]; then
+    LANG=C sed -i -e '/^\([a-zA-Z_][a-zA-Z0-9_]*\)\t.*\t\/\^struct \1;.*\$\/;"\tx$/d' $1
+fi
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 5ff6777..cc3520d 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -115,23 +115,23 @@
 
 	if (aa_g_audit_header) {
 		audit_log_format(ab, "apparmor=");
-		audit_log_string(ab, aa_audit_type[sa->aad.type]);
+		audit_log_string(ab, aa_audit_type[sa->aad->type]);
 	}
 
-	if (sa->aad.op) {
+	if (sa->aad->op) {
 		audit_log_format(ab, " operation=");
-		audit_log_string(ab, op_table[sa->aad.op]);
+		audit_log_string(ab, op_table[sa->aad->op]);
 	}
 
-	if (sa->aad.info) {
+	if (sa->aad->info) {
 		audit_log_format(ab, " info=");
-		audit_log_string(ab, sa->aad.info);
-		if (sa->aad.error)
-			audit_log_format(ab, " error=%d", sa->aad.error);
+		audit_log_string(ab, sa->aad->info);
+		if (sa->aad->error)
+			audit_log_format(ab, " error=%d", sa->aad->error);
 	}
 
-	if (sa->aad.profile) {
-		struct aa_profile *profile = sa->aad.profile;
+	if (sa->aad->profile) {
+		struct aa_profile *profile = sa->aad->profile;
 		pid_t pid;
 		rcu_read_lock();
 		pid = rcu_dereference(tsk->real_parent)->pid;
@@ -145,9 +145,9 @@
 		audit_log_untrustedstring(ab, profile->base.hname);
 	}
 
-	if (sa->aad.name) {
+	if (sa->aad->name) {
 		audit_log_format(ab, " name=");
-		audit_log_untrustedstring(ab, sa->aad.name);
+		audit_log_untrustedstring(ab, sa->aad->name);
 	}
 }
 
@@ -159,10 +159,8 @@
 void aa_audit_msg(int type, struct common_audit_data *sa,
 		  void (*cb) (struct audit_buffer *, void *))
 {
-	sa->aad.type = type;
-	sa->lsm_pre_audit = audit_pre;
-	sa->lsm_post_audit = cb;
-	common_lsm_audit(sa);
+	sa->aad->type = type;
+	common_lsm_audit(sa, audit_pre, cb);
 }
 
 /**
@@ -184,7 +182,7 @@
 	BUG_ON(!profile);
 
 	if (type == AUDIT_APPARMOR_AUTO) {
-		if (likely(!sa->aad.error)) {
+		if (likely(!sa->aad->error)) {
 			if (AUDIT_MODE(profile) != AUDIT_ALL)
 				return 0;
 			type = AUDIT_APPARMOR_AUDIT;
@@ -196,21 +194,21 @@
 	if (AUDIT_MODE(profile) == AUDIT_QUIET ||
 	    (type == AUDIT_APPARMOR_DENIED &&
 	     AUDIT_MODE(profile) == AUDIT_QUIET))
-		return sa->aad.error;
+		return sa->aad->error;
 
 	if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
 		type = AUDIT_APPARMOR_KILL;
 
 	if (!unconfined(profile))
-		sa->aad.profile = profile;
+		sa->aad->profile = profile;
 
 	aa_audit_msg(type, sa, cb);
 
-	if (sa->aad.type == AUDIT_APPARMOR_KILL)
+	if (sa->aad->type == AUDIT_APPARMOR_KILL)
 		(void)send_sig_info(SIGKILL, NULL, sa->tsk ? sa->tsk : current);
 
-	if (sa->aad.type == AUDIT_APPARMOR_ALLOWED)
-		return complain_error(sa->aad.error);
+	if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
+		return complain_error(sa->aad->error);
 
-	return sa->aad.error;
+	return sa->aad->error;
 }
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 9982c48..088dba3 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -64,11 +64,13 @@
 	struct audit_cache *ent;
 	int type = AUDIT_APPARMOR_AUTO;
 	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
 	COMMON_AUDIT_DATA_INIT(&sa, CAP);
+	sa.aad = &aad;
 	sa.tsk = task;
 	sa.u.cap = cap;
-	sa.aad.op = OP_CAPABLE;
-	sa.aad.error = error;
+	sa.aad->op = OP_CAPABLE;
+	sa.aad->error = error;
 
 	if (likely(!error)) {
 		/* test if auditing is being forced */
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 5d176f2..2f8fcba 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -67,22 +67,22 @@
 	struct common_audit_data *sa = va;
 	uid_t fsuid = current_fsuid();
 
-	if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
+	if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
 		audit_log_format(ab, " requested_mask=");
-		audit_file_mask(ab, sa->aad.fs.request);
+		audit_file_mask(ab, sa->aad->fs.request);
 	}
-	if (sa->aad.fs.denied & AA_AUDIT_FILE_MASK) {
+	if (sa->aad->fs.denied & AA_AUDIT_FILE_MASK) {
 		audit_log_format(ab, " denied_mask=");
-		audit_file_mask(ab, sa->aad.fs.denied);
+		audit_file_mask(ab, sa->aad->fs.denied);
 	}
-	if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
+	if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
 		audit_log_format(ab, " fsuid=%d", fsuid);
-		audit_log_format(ab, " ouid=%d", sa->aad.fs.ouid);
+		audit_log_format(ab, " ouid=%d", sa->aad->fs.ouid);
 	}
 
-	if (sa->aad.fs.target) {
+	if (sa->aad->fs.target) {
 		audit_log_format(ab, " target=");
-		audit_log_untrustedstring(ab, sa->aad.fs.target);
+		audit_log_untrustedstring(ab, sa->aad->fs.target);
 	}
 }
 
@@ -107,45 +107,47 @@
 {
 	int type = AUDIT_APPARMOR_AUTO;
 	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
 	COMMON_AUDIT_DATA_INIT(&sa, NONE);
-	sa.aad.op = op,
-	sa.aad.fs.request = request;
-	sa.aad.name = name;
-	sa.aad.fs.target = target;
-	sa.aad.fs.ouid = ouid;
-	sa.aad.info = info;
-	sa.aad.error = error;
+	sa.aad = &aad;
+	aad.op = op,
+	aad.fs.request = request;
+	aad.name = name;
+	aad.fs.target = target;
+	aad.fs.ouid = ouid;
+	aad.info = info;
+	aad.error = error;
 
-	if (likely(!sa.aad.error)) {
+	if (likely(!sa.aad->error)) {
 		u32 mask = perms->audit;
 
 		if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
 			mask = 0xffff;
 
 		/* mask off perms that are not being force audited */
-		sa.aad.fs.request &= mask;
+		sa.aad->fs.request &= mask;
 
-		if (likely(!sa.aad.fs.request))
+		if (likely(!sa.aad->fs.request))
 			return 0;
 		type = AUDIT_APPARMOR_AUDIT;
 	} else {
 		/* only report permissions that were denied */
-		sa.aad.fs.request = sa.aad.fs.request & ~perms->allow;
+		sa.aad->fs.request = sa.aad->fs.request & ~perms->allow;
 
-		if (sa.aad.fs.request & perms->kill)
+		if (sa.aad->fs.request & perms->kill)
 			type = AUDIT_APPARMOR_KILL;
 
 		/* quiet known rejects, assumes quiet and kill do not overlap */
-		if ((sa.aad.fs.request & perms->quiet) &&
+		if ((sa.aad->fs.request & perms->quiet) &&
 		    AUDIT_MODE(profile) != AUDIT_NOQUIET &&
 		    AUDIT_MODE(profile) != AUDIT_ALL)
-			sa.aad.fs.request &= ~perms->quiet;
+			sa.aad->fs.request &= ~perms->quiet;
 
-		if (!sa.aad.fs.request)
-			return COMPLAIN_MODE(profile) ? 0 : sa.aad.error;
+		if (!sa.aad->fs.request)
+			return COMPLAIN_MODE(profile) ? 0 : sa.aad->error;
 	}
 
-	sa.aad.fs.denied = sa.aad.fs.request & ~perms->allow;
+	sa.aad->fs.denied = sa.aad->fs.request & ~perms->allow;
 	return aa_audit(type, profile, gfp, &sa, file_audit_cb);
 }
 
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index 4ba78c2..3868b1e 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -103,7 +103,33 @@
 };
 
 
-/* define a short hand for apparmor_audit_data portion of common_audit_data */
+struct apparmor_audit_data {
+	int error;
+	int op;
+	int type;
+	void *profile;
+	const char *name;
+	const char *info;
+	union {
+		void *target;
+		struct {
+			long pos;
+			void *target;
+		} iface;
+		struct {
+			int rlim;
+			unsigned long max;
+		} rlim;
+		struct {
+			const char *target;
+			u32 request;
+			u32 denied;
+			uid_t ouid;
+		} fs;
+	};
+};
+
+/* define a short hand for apparmor_audit_data structure */
 #define aad apparmor_audit_data
 
 void aa_audit_msg(int type, struct common_audit_data *sa,
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ee05c6..c3da93a 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -26,7 +26,7 @@
 {
 	struct common_audit_data *sa = va;
 	audit_log_format(ab, " target=");
-	audit_log_untrustedstring(ab, sa->aad.target);
+	audit_log_untrustedstring(ab, sa->aad->target);
 }
 
 /**
@@ -41,10 +41,12 @@
 			   struct aa_profile *target, int error)
 {
 	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
 	COMMON_AUDIT_DATA_INIT(&sa, NONE);
-	sa.aad.op = OP_PTRACE;
-	sa.aad.target = target;
-	sa.aad.error = error;
+	sa.aad = &aad;
+	aad.op = OP_PTRACE;
+	aad.target = target;
+	aad.error = error;
 
 	return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_ATOMIC, &sa,
 			audit_cb);
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 9516948..e75829b 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -65,8 +65,10 @@
 {
 	if (audit_enabled) {
 		struct common_audit_data sa;
+		struct apparmor_audit_data aad = {0,};
 		COMMON_AUDIT_DATA_INIT(&sa, NONE);
-		sa.aad.info = str;
+		sa.aad = &aad;
+		aad.info = str;
 		aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
 	}
 	printk(KERN_INFO "AppArmor: %s\n", str);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 97ce8fa..ad05d39 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -588,10 +588,12 @@
 			error = aa_setprocattr_permipc(args);
 		} else {
 			struct common_audit_data sa;
+			struct apparmor_audit_data aad = {0,};
 			COMMON_AUDIT_DATA_INIT(&sa, NONE);
-			sa.aad.op = OP_SETPROCATTR;
-			sa.aad.info = name;
-			sa.aad.error = -EINVAL;
+			sa.aad = &aad;
+			aad.op = OP_SETPROCATTR;
+			aad.info = name;
+			aad.error = -EINVAL;
 			return aa_audit(AUDIT_APPARMOR_DENIED,
 					__aa_current_profile(), GFP_KERNEL,
 					&sa, NULL);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 9064143..f1f7506 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -964,11 +964,13 @@
 			int error)
 {
 	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
 	COMMON_AUDIT_DATA_INIT(&sa, NONE);
-	sa.aad.op = op;
-	sa.aad.name = name;
-	sa.aad.info = info;
-	sa.aad.error = error;
+	sa.aad = &aad;
+	aad.op = op;
+	aad.name = name;
+	aad.info = info;
+	aad.error = error;
 
 	return aa_audit(AUDIT_APPARMOR_STATUS, __aa_current_profile(), gfp,
 			&sa, NULL);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 25fd51e..deab7c7 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -70,13 +70,13 @@
 static void audit_cb(struct audit_buffer *ab, void *va)
 {
 	struct common_audit_data *sa = va;
-	if (sa->aad.iface.target) {
-		struct aa_profile *name = sa->aad.iface.target;
+	if (sa->aad->iface.target) {
+		struct aa_profile *name = sa->aad->iface.target;
 		audit_log_format(ab, " name=");
 		audit_log_untrustedstring(ab, name->base.hname);
 	}
-	if (sa->aad.iface.pos)
-		audit_log_format(ab, " offset=%ld", sa->aad.iface.pos);
+	if (sa->aad->iface.pos)
+		audit_log_format(ab, " offset=%ld", sa->aad->iface.pos);
 }
 
 /**
@@ -94,13 +94,15 @@
 {
 	struct aa_profile *profile = __aa_current_profile();
 	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
 	COMMON_AUDIT_DATA_INIT(&sa, NONE);
+	sa.aad = &aad;
 	if (e)
-		sa.aad.iface.pos = e->pos - e->start;
-	sa.aad.iface.target = new;
-	sa.aad.name = name;
-	sa.aad.info = info;
-	sa.aad.error = error;
+		aad.iface.pos = e->pos - e->start;
+	aad.iface.target = new;
+	aad.name = name;
+	aad.info = info;
+	aad.error = error;
 
 	return aa_audit(AUDIT_APPARMOR_STATUS, profile, GFP_KERNEL, &sa,
 			audit_cb);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 72c25a4f..2fe8613 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -34,7 +34,7 @@
 	struct common_audit_data *sa = va;
 
 	audit_log_format(ab, " rlimit=%s value=%lu",
-			 rlim_names[sa->aad.rlim.rlim], sa->aad.rlim.max);
+			 rlim_names[sa->aad->rlim.rlim], sa->aad->rlim.max);
 }
 
 /**
@@ -50,12 +50,14 @@
 			  unsigned long value, int error)
 {
 	struct common_audit_data sa;
+	struct apparmor_audit_data aad = {0,};
 
 	COMMON_AUDIT_DATA_INIT(&sa, NONE);
-	sa.aad.op = OP_SETRLIMIT,
-	sa.aad.rlim.rlim = resource;
-	sa.aad.rlim.max = value;
-	sa.aad.error = error;
+	sa.aad = &aad;
+	aad.op = OP_SETRLIMIT,
+	aad.rlim.rlim = resource;
+	aad.rlim.max = value;
+	aad.error = error;
 	return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_KERNEL, &sa,
 			audit_cb);
 }
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 8b8f090..90c129b 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -49,8 +49,8 @@
 	if (ih == NULL)
 		return -EINVAL;
 
-	ad->u.net.v4info.saddr = ih->saddr;
-	ad->u.net.v4info.daddr = ih->daddr;
+	ad->u.net->v4info.saddr = ih->saddr;
+	ad->u.net->v4info.daddr = ih->daddr;
 
 	if (proto)
 		*proto = ih->protocol;
@@ -64,8 +64,8 @@
 		if (th == NULL)
 			break;
 
-		ad->u.net.sport = th->source;
-		ad->u.net.dport = th->dest;
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
 		break;
 	}
 	case IPPROTO_UDP: {
@@ -73,8 +73,8 @@
 		if (uh == NULL)
 			break;
 
-		ad->u.net.sport = uh->source;
-		ad->u.net.dport = uh->dest;
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
 		break;
 	}
 	case IPPROTO_DCCP: {
@@ -82,16 +82,16 @@
 		if (dh == NULL)
 			break;
 
-		ad->u.net.sport = dh->dccph_sport;
-		ad->u.net.dport = dh->dccph_dport;
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
 		break;
 	}
 	case IPPROTO_SCTP: {
 		struct sctphdr *sh = sctp_hdr(skb);
 		if (sh == NULL)
 			break;
-		ad->u.net.sport = sh->source;
-		ad->u.net.dport = sh->dest;
+		ad->u.net->sport = sh->source;
+		ad->u.net->dport = sh->dest;
 		break;
 	}
 	default:
@@ -119,8 +119,8 @@
 	ip6 = ipv6_hdr(skb);
 	if (ip6 == NULL)
 		return -EINVAL;
-	ad->u.net.v6info.saddr = ip6->saddr;
-	ad->u.net.v6info.daddr = ip6->daddr;
+	ad->u.net->v6info.saddr = ip6->saddr;
+	ad->u.net->v6info.daddr = ip6->daddr;
 	ret = 0;
 	/* IPv6 can have several extension header before the Transport header
 	 * skip them */
@@ -140,8 +140,8 @@
 		if (th == NULL)
 			break;
 
-		ad->u.net.sport = th->source;
-		ad->u.net.dport = th->dest;
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
 		break;
 	}
 	case IPPROTO_UDP: {
@@ -151,8 +151,8 @@
 		if (uh == NULL)
 			break;
 
-		ad->u.net.sport = uh->source;
-		ad->u.net.dport = uh->dest;
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
 		break;
 	}
 	case IPPROTO_DCCP: {
@@ -162,8 +162,8 @@
 		if (dh == NULL)
 			break;
 
-		ad->u.net.sport = dh->dccph_sport;
-		ad->u.net.dport = dh->dccph_dport;
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
 		break;
 	}
 	case IPPROTO_SCTP: {
@@ -172,8 +172,8 @@
 		sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
 		if (sh == NULL)
 			break;
-		ad->u.net.sport = sh->source;
-		ad->u.net.dport = sh->dest;
+		ad->u.net->sport = sh->source;
+		ad->u.net->dport = sh->dest;
 		break;
 	}
 	default:
@@ -281,8 +281,8 @@
 		}
 		break;
 	case LSM_AUDIT_DATA_NET:
-		if (a->u.net.sk) {
-			struct sock *sk = a->u.net.sk;
+		if (a->u.net->sk) {
+			struct sock *sk = a->u.net->sk;
 			struct unix_sock *u;
 			int len = 0;
 			char *p = NULL;
@@ -330,29 +330,29 @@
 			}
 		}
 
-		switch (a->u.net.family) {
+		switch (a->u.net->family) {
 		case AF_INET:
-			print_ipv4_addr(ab, a->u.net.v4info.saddr,
-					a->u.net.sport,
+			print_ipv4_addr(ab, a->u.net->v4info.saddr,
+					a->u.net->sport,
 					"saddr", "src");
-			print_ipv4_addr(ab, a->u.net.v4info.daddr,
-					a->u.net.dport,
+			print_ipv4_addr(ab, a->u.net->v4info.daddr,
+					a->u.net->dport,
 					"daddr", "dest");
 			break;
 		case AF_INET6:
-			print_ipv6_addr(ab, &a->u.net.v6info.saddr,
-					a->u.net.sport,
+			print_ipv6_addr(ab, &a->u.net->v6info.saddr,
+					a->u.net->sport,
 					"saddr", "src");
-			print_ipv6_addr(ab, &a->u.net.v6info.daddr,
-					a->u.net.dport,
+			print_ipv6_addr(ab, &a->u.net->v6info.daddr,
+					a->u.net->dport,
 					"daddr", "dest");
 			break;
 		}
-		if (a->u.net.netif > 0) {
+		if (a->u.net->netif > 0) {
 			struct net_device *dev;
 
 			/* NOTE: we always use init's namespace */
-			dev = dev_get_by_index(&init_net, a->u.net.netif);
+			dev = dev_get_by_index(&init_net, a->u.net->netif);
 			if (dev) {
 				audit_log_format(ab, " netif=%s", dev->name);
 				dev_put(dev);
@@ -378,11 +378,15 @@
 /**
  * common_lsm_audit - generic LSM auditing function
  * @a:  auxiliary audit data
+ * @pre_audit: lsm-specific pre-audit callback
+ * @post_audit: lsm-specific post-audit callback
  *
  * setup the audit buffer for common security information
  * uses callback to print LSM specific information
  */
-void common_lsm_audit(struct common_audit_data *a)
+void common_lsm_audit(struct common_audit_data *a,
+	void (*pre_audit)(struct audit_buffer *, void *),
+	void (*post_audit)(struct audit_buffer *, void *))
 {
 	struct audit_buffer *ab;
 
@@ -394,13 +398,13 @@
 	if (ab == NULL)
 		return;
 
-	if (a->lsm_pre_audit)
-		a->lsm_pre_audit(ab, a);
+	if (pre_audit)
+		pre_audit(ab, a);
 
 	dump_common_audit_data(ab, a);
 
-	if (a->lsm_post_audit)
-		a->lsm_post_audit(ab, a);
+	if (post_audit)
+		post_audit(ab, a);
 
 	audit_log_end(ab);
 }
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 6989472..8ee42b2 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -436,9 +436,9 @@
 {
 	struct common_audit_data *ad = a;
 	audit_log_format(ab, "avc:  %s ",
-			 ad->selinux_audit_data.denied ? "denied" : "granted");
-	avc_dump_av(ab, ad->selinux_audit_data.tclass,
-			ad->selinux_audit_data.audited);
+			 ad->selinux_audit_data->slad->denied ? "denied" : "granted");
+	avc_dump_av(ab, ad->selinux_audit_data->slad->tclass,
+			ad->selinux_audit_data->slad->audited);
 	audit_log_format(ab, " for ");
 }
 
@@ -452,22 +452,25 @@
 {
 	struct common_audit_data *ad = a;
 	audit_log_format(ab, " ");
-	avc_dump_query(ab, ad->selinux_audit_data.ssid,
-			   ad->selinux_audit_data.tsid,
-			   ad->selinux_audit_data.tclass);
+	avc_dump_query(ab, ad->selinux_audit_data->slad->ssid,
+			   ad->selinux_audit_data->slad->tsid,
+			   ad->selinux_audit_data->slad->tclass);
 }
 
 /* This is the slow part of avc audit with big stack footprint */
 static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
 		u32 requested, u32 audited, u32 denied,
-		struct av_decision *avd, struct common_audit_data *a,
+		struct common_audit_data *a,
 		unsigned flags)
 {
 	struct common_audit_data stack_data;
+	struct selinux_audit_data sad = {0,};
+	struct selinux_late_audit_data slad;
 
 	if (!a) {
 		a = &stack_data;
 		COMMON_AUDIT_DATA_INIT(a, NONE);
+		a->selinux_audit_data = &sad;
 	}
 
 	/*
@@ -481,15 +484,15 @@
 	    (flags & MAY_NOT_BLOCK))
 		return -ECHILD;
 
-	a->selinux_audit_data.tclass = tclass;
-	a->selinux_audit_data.requested = requested;
-	a->selinux_audit_data.ssid = ssid;
-	a->selinux_audit_data.tsid = tsid;
-	a->selinux_audit_data.audited = audited;
-	a->selinux_audit_data.denied = denied;
-	a->lsm_pre_audit = avc_audit_pre_callback;
-	a->lsm_post_audit = avc_audit_post_callback;
-	common_lsm_audit(a);
+	slad.tclass = tclass;
+	slad.requested = requested;
+	slad.ssid = ssid;
+	slad.tsid = tsid;
+	slad.audited = audited;
+	slad.denied = denied;
+
+	a->selinux_audit_data->slad = &slad;
+	common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
 	return 0;
 }
 
@@ -513,7 +516,7 @@
  * be performed under a lock, to allow the lock to be released
  * before calling the auditing code.
  */
-int avc_audit(u32 ssid, u32 tsid,
+inline int avc_audit(u32 ssid, u32 tsid,
 	       u16 tclass, u32 requested,
 	       struct av_decision *avd, int result, struct common_audit_data *a,
 	       unsigned flags)
@@ -523,7 +526,7 @@
 	if (unlikely(denied)) {
 		audited = denied & avd->auditdeny;
 		/*
-		 * a->selinux_audit_data.auditdeny is TRICKY!  Setting a bit in
+		 * a->selinux_audit_data->auditdeny is TRICKY!  Setting a bit in
 		 * this field means that ANY denials should NOT be audited if
 		 * the policy contains an explicit dontaudit rule for that
 		 * permission.  Take notice that this is unrelated to the
@@ -532,15 +535,15 @@
 		 *
 		 * denied == READ
 		 * avd.auditdeny & ACCESS == 0 (not set means explicit rule)
-		 * selinux_audit_data.auditdeny & ACCESS == 1
+		 * selinux_audit_data->auditdeny & ACCESS == 1
 		 *
 		 * We will NOT audit the denial even though the denied
 		 * permission was READ and the auditdeny checks were for
 		 * ACCESS
 		 */
 		if (a &&
-		    a->selinux_audit_data.auditdeny &&
-		    !(a->selinux_audit_data.auditdeny & avd->auditdeny))
+		    a->selinux_audit_data->auditdeny &&
+		    !(a->selinux_audit_data->auditdeny & avd->auditdeny))
 			audited = 0;
 	} else if (result)
 		audited = denied = requested;
@@ -551,7 +554,7 @@
 
 	return slow_avc_audit(ssid, tsid, tclass,
 		requested, audited, denied,
-		avd, a, flags);
+		a, flags);
 }
 
 /**
@@ -741,6 +744,41 @@
 	return rc;
 }
 
+/*
+ * Slow-path helper function for avc_has_perm_noaudit,
+ * when the avc_node lookup fails. We get called with
+ * the RCU read lock held, and need to return with it
+ * still held, but drop if for the security compute.
+ *
+ * Don't inline this, since it's the slow-path and just
+ * results in a bigger stack frame.
+ */
+static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
+			 u16 tclass, struct av_decision *avd)
+{
+	rcu_read_unlock();
+	security_compute_av(ssid, tsid, tclass, avd);
+	rcu_read_lock();
+	return avc_insert(ssid, tsid, tclass, avd);
+}
+
+static noinline int avc_denied(u32 ssid, u32 tsid,
+			 u16 tclass, u32 requested,
+			 unsigned flags,
+			 struct av_decision *avd)
+{
+	if (flags & AVC_STRICT)
+		return -EACCES;
+
+	if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
+		return -EACCES;
+
+	avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
+				tsid, tclass, avd->seqno);
+	return 0;
+}
+
+
 /**
  * avc_has_perm_noaudit - Check permissions but perform no auditing.
  * @ssid: source security identifier
@@ -761,7 +799,7 @@
  * auditing, e.g. in cases where a lock must be held for the check but
  * should be released for the auditing.
  */
-int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
 			 u16 tclass, u32 requested,
 			 unsigned flags,
 			 struct av_decision *avd)
@@ -776,26 +814,15 @@
 
 	node = avc_lookup(ssid, tsid, tclass);
 	if (unlikely(!node)) {
-		rcu_read_unlock();
-		security_compute_av(ssid, tsid, tclass, avd);
-		rcu_read_lock();
-		node = avc_insert(ssid, tsid, tclass, avd);
+		node = avc_compute_av(ssid, tsid, tclass, avd);
 	} else {
 		memcpy(avd, &node->ae.avd, sizeof(*avd));
 		avd = &node->ae.avd;
 	}
 
 	denied = requested & ~(avd->allowed);
-
-	if (denied) {
-		if (flags & AVC_STRICT)
-			rc = -EACCES;
-		else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE))
-			avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
-					tsid, tclass, avd->seqno);
-		else
-			rc = -EACCES;
-	}
+	if (unlikely(denied))
+		rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
 
 	rcu_read_unlock();
 	return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 15c6c56..d85b793 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -28,7 +28,6 @@
 #include <linux/kernel.h>
 #include <linux/tracehook.h>
 #include <linux/errno.h>
-#include <linux/ext2_fs.h>
 #include <linux/sched.h>
 #include <linux/security.h>
 #include <linux/xattr.h>
@@ -1421,6 +1420,7 @@
 			       int cap, int audit)
 {
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	struct av_decision avd;
 	u16 sclass;
 	u32 sid = cred_sid(cred);
@@ -1428,6 +1428,7 @@
 	int rc;
 
 	COMMON_AUDIT_DATA_INIT(&ad, CAP);
+	ad.selinux_audit_data = &sad;
 	ad.tsk = current;
 	ad.u.cap = cap;
 
@@ -1493,9 +1494,11 @@
 				unsigned flags)
 {
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 
 	COMMON_AUDIT_DATA_INIT(&ad, INODE);
 	ad.u.inode = inode;
+	ad.selinux_audit_data = &sad;
 	return inode_has_perm(cred, inode, perms, &ad, flags);
 }
 
@@ -1508,9 +1511,11 @@
 {
 	struct inode *inode = dentry->d_inode;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 
 	COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
 	ad.u.dentry = dentry;
+	ad.selinux_audit_data = &sad;
 	return inode_has_perm(cred, inode, av, &ad, 0);
 }
 
@@ -1523,9 +1528,11 @@
 {
 	struct inode *inode = path->dentry->d_inode;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 
 	COMMON_AUDIT_DATA_INIT(&ad, PATH);
 	ad.u.path = *path;
+	ad.selinux_audit_data = &sad;
 	return inode_has_perm(cred, inode, av, &ad, 0);
 }
 
@@ -1544,11 +1551,13 @@
 	struct file_security_struct *fsec = file->f_security;
 	struct inode *inode = file->f_path.dentry->d_inode;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = cred_sid(cred);
 	int rc;
 
 	COMMON_AUDIT_DATA_INIT(&ad, PATH);
 	ad.u.path = file->f_path;
+	ad.selinux_audit_data = &sad;
 
 	if (sid != fsec->sid) {
 		rc = avc_has_perm(sid, fsec->sid,
@@ -1578,6 +1587,7 @@
 	struct superblock_security_struct *sbsec;
 	u32 sid, newsid;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	int rc;
 
 	dsec = dir->i_security;
@@ -1588,6 +1598,7 @@
 
 	COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
 	ad.u.dentry = dentry;
+	ad.selinux_audit_data = &sad;
 
 	rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
 			  DIR__ADD_NAME | DIR__SEARCH,
@@ -1632,6 +1643,7 @@
 {
 	struct inode_security_struct *dsec, *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 	u32 av;
 	int rc;
@@ -1641,6 +1653,7 @@
 
 	COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
 	ad.u.dentry = dentry;
+	ad.selinux_audit_data = &sad;
 
 	av = DIR__SEARCH;
 	av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
@@ -1675,6 +1688,7 @@
 {
 	struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 	u32 av;
 	int old_is_dir, new_is_dir;
@@ -1686,6 +1700,7 @@
 	new_dsec = new_dir->i_security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+	ad.selinux_audit_data = &sad;
 
 	ad.u.dentry = old_dentry;
 	rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
@@ -1971,6 +1986,7 @@
 	struct task_security_struct *new_tsec;
 	struct inode_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	struct inode *inode = bprm->file->f_path.dentry->d_inode;
 	int rc;
 
@@ -2010,6 +2026,7 @@
 	}
 
 	COMMON_AUDIT_DATA_INIT(&ad, PATH);
+	ad.selinux_audit_data = &sad;
 	ad.u.path = bprm->file->f_path;
 
 	if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
@@ -2099,6 +2116,7 @@
 					    struct files_struct *files)
 {
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	struct file *file, *devnull = NULL;
 	struct tty_struct *tty;
 	struct fdtable *fdt;
@@ -2136,6 +2154,7 @@
 	/* Revalidate access to inherited open files. */
 
 	COMMON_AUDIT_DATA_INIT(&ad, INODE);
+	ad.selinux_audit_data = &sad;
 
 	spin_lock(&files->file_lock);
 	for (;;) {
@@ -2473,6 +2492,7 @@
 {
 	const struct cred *cred = current_cred();
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	int rc;
 
 	rc = superblock_doinit(sb, data);
@@ -2484,6 +2504,7 @@
 		return 0;
 
 	COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+	ad.selinux_audit_data = &sad;
 	ad.u.dentry = sb->s_root;
 	return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
 }
@@ -2492,8 +2513,10 @@
 {
 	const struct cred *cred = current_cred();
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 
 	COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+	ad.selinux_audit_data = &sad;
 	ad.u.dentry = dentry->d_sb->s_root;
 	return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
 }
@@ -2657,6 +2680,7 @@
 {
 	const struct cred *cred = current_cred();
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 perms;
 	bool from_access;
 	unsigned flags = mask & MAY_NOT_BLOCK;
@@ -2669,10 +2693,11 @@
 		return 0;
 
 	COMMON_AUDIT_DATA_INIT(&ad, INODE);
+	ad.selinux_audit_data = &sad;
 	ad.u.inode = inode;
 
 	if (from_access)
-		ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS;
+		ad.selinux_audit_data->auditdeny |= FILE__AUDIT_ACCESS;
 
 	perms = file_mask_to_av(inode->i_mode, mask);
 
@@ -2738,6 +2763,7 @@
 	struct inode_security_struct *isec = inode->i_security;
 	struct superblock_security_struct *sbsec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 newsid, sid = current_sid();
 	int rc = 0;
 
@@ -2752,6 +2778,7 @@
 		return -EPERM;
 
 	COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+	ad.selinux_audit_data = &sad;
 	ad.u.dentry = dentry;
 
 	rc = avc_has_perm(sid, isec->sid, isec->sclass,
@@ -2971,15 +2998,15 @@
 	/* fall through */
 	case FIGETBSZ:
 	/* fall through */
-	case EXT2_IOC_GETFLAGS:
+	case FS_IOC_GETFLAGS:
 	/* fall through */
-	case EXT2_IOC_GETVERSION:
+	case FS_IOC_GETVERSION:
 		error = file_has_perm(cred, file, FILE__GETATTR);
 		break;
 
-	case EXT2_IOC_SETFLAGS:
+	case FS_IOC_SETFLAGS:
 	/* fall through */
-	case EXT2_IOC_SETVERSION:
+	case FS_IOC_SETVERSION:
 		error = file_has_perm(cred, file, FILE__SETATTR);
 		break;
 
@@ -3346,10 +3373,12 @@
 {
 	u32 sid;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 
 	sid = task_sid(current);
 
 	COMMON_AUDIT_DATA_INIT(&ad, KMOD);
+	ad.selinux_audit_data = &sad;
 	ad.u.kmod_name = kmod_name;
 
 	return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
@@ -3488,8 +3517,8 @@
 	if (ihlen < sizeof(_iph))
 		goto out;
 
-	ad->u.net.v4info.saddr = ih->saddr;
-	ad->u.net.v4info.daddr = ih->daddr;
+	ad->u.net->v4info.saddr = ih->saddr;
+	ad->u.net->v4info.daddr = ih->daddr;
 	ret = 0;
 
 	if (proto)
@@ -3507,8 +3536,8 @@
 		if (th == NULL)
 			break;
 
-		ad->u.net.sport = th->source;
-		ad->u.net.dport = th->dest;
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
 		break;
 	}
 
@@ -3523,8 +3552,8 @@
 		if (uh == NULL)
 			break;
 
-		ad->u.net.sport = uh->source;
-		ad->u.net.dport = uh->dest;
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
 		break;
 	}
 
@@ -3539,8 +3568,8 @@
 		if (dh == NULL)
 			break;
 
-		ad->u.net.sport = dh->dccph_sport;
-		ad->u.net.dport = dh->dccph_dport;
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
 		break;
 	}
 
@@ -3567,8 +3596,8 @@
 	if (ip6 == NULL)
 		goto out;
 
-	ad->u.net.v6info.saddr = ip6->saddr;
-	ad->u.net.v6info.daddr = ip6->daddr;
+	ad->u.net->v6info.saddr = ip6->saddr;
+	ad->u.net->v6info.daddr = ip6->daddr;
 	ret = 0;
 
 	nexthdr = ip6->nexthdr;
@@ -3588,8 +3617,8 @@
 		if (th == NULL)
 			break;
 
-		ad->u.net.sport = th->source;
-		ad->u.net.dport = th->dest;
+		ad->u.net->sport = th->source;
+		ad->u.net->dport = th->dest;
 		break;
 	}
 
@@ -3600,8 +3629,8 @@
 		if (uh == NULL)
 			break;
 
-		ad->u.net.sport = uh->source;
-		ad->u.net.dport = uh->dest;
+		ad->u.net->sport = uh->source;
+		ad->u.net->dport = uh->dest;
 		break;
 	}
 
@@ -3612,8 +3641,8 @@
 		if (dh == NULL)
 			break;
 
-		ad->u.net.sport = dh->dccph_sport;
-		ad->u.net.dport = dh->dccph_dport;
+		ad->u.net->sport = dh->dccph_sport;
+		ad->u.net->dport = dh->dccph_dport;
 		break;
 	}
 
@@ -3633,13 +3662,13 @@
 	char *addrp;
 	int ret;
 
-	switch (ad->u.net.family) {
+	switch (ad->u.net->family) {
 	case PF_INET:
 		ret = selinux_parse_skb_ipv4(skb, ad, proto);
 		if (ret)
 			goto parse_error;
-		addrp = (char *)(src ? &ad->u.net.v4info.saddr :
-				       &ad->u.net.v4info.daddr);
+		addrp = (char *)(src ? &ad->u.net->v4info.saddr :
+				       &ad->u.net->v4info.daddr);
 		goto okay;
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -3647,8 +3676,8 @@
 		ret = selinux_parse_skb_ipv6(skb, ad, proto);
 		if (ret)
 			goto parse_error;
-		addrp = (char *)(src ? &ad->u.net.v6info.saddr :
-				       &ad->u.net.v6info.daddr);
+		addrp = (char *)(src ? &ad->u.net->v6info.saddr :
+				       &ad->u.net->v6info.daddr);
 		goto okay;
 #endif	/* IPV6 */
 	default:
@@ -3722,13 +3751,17 @@
 {
 	struct sk_security_struct *sksec = sk->sk_security;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 	u32 tsid = task_sid(task);
 
 	if (sksec->sid == SECINITSID_KERNEL)
 		return 0;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.sk = sk;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->sk = sk;
 
 	return avc_has_perm(tsid, sksec->sid, sksec->sclass, perms, &ad);
 }
@@ -3806,6 +3839,8 @@
 		char *addrp;
 		struct sk_security_struct *sksec = sk->sk_security;
 		struct common_audit_data ad;
+		struct selinux_audit_data sad = {0,};
+		struct lsm_network_audit net = {0,};
 		struct sockaddr_in *addr4 = NULL;
 		struct sockaddr_in6 *addr6 = NULL;
 		unsigned short snum;
@@ -3832,8 +3867,10 @@
 				if (err)
 					goto out;
 				COMMON_AUDIT_DATA_INIT(&ad, NET);
-				ad.u.net.sport = htons(snum);
-				ad.u.net.family = family;
+				ad.selinux_audit_data = &sad;
+				ad.u.net = &net;
+				ad.u.net->sport = htons(snum);
+				ad.u.net->family = family;
 				err = avc_has_perm(sksec->sid, sid,
 						   sksec->sclass,
 						   SOCKET__NAME_BIND, &ad);
@@ -3865,13 +3902,15 @@
 			goto out;
 
 		COMMON_AUDIT_DATA_INIT(&ad, NET);
-		ad.u.net.sport = htons(snum);
-		ad.u.net.family = family;
+		ad.selinux_audit_data = &sad;
+		ad.u.net = &net;
+		ad.u.net->sport = htons(snum);
+		ad.u.net->family = family;
 
 		if (family == PF_INET)
-			ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
+			ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
 		else
-			ad.u.net.v6info.saddr = addr6->sin6_addr;
+			ad.u.net->v6info.saddr = addr6->sin6_addr;
 
 		err = avc_has_perm(sksec->sid, sid,
 				   sksec->sclass, node_perm, &ad);
@@ -3898,6 +3937,8 @@
 	if (sksec->sclass == SECCLASS_TCP_SOCKET ||
 	    sksec->sclass == SECCLASS_DCCP_SOCKET) {
 		struct common_audit_data ad;
+		struct selinux_audit_data sad = {0,};
+		struct lsm_network_audit net = {0,};
 		struct sockaddr_in *addr4 = NULL;
 		struct sockaddr_in6 *addr6 = NULL;
 		unsigned short snum;
@@ -3923,8 +3964,10 @@
 		       TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
 
 		COMMON_AUDIT_DATA_INIT(&ad, NET);
-		ad.u.net.dport = htons(snum);
-		ad.u.net.family = sk->sk_family;
+		ad.selinux_audit_data = &sad;
+		ad.u.net = &net;
+		ad.u.net->dport = htons(snum);
+		ad.u.net->family = sk->sk_family;
 		err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad);
 		if (err)
 			goto out;
@@ -4013,10 +4056,14 @@
 	struct sk_security_struct *sksec_other = other->sk_security;
 	struct sk_security_struct *sksec_new = newsk->sk_security;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 	int err;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.sk = other;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->sk = other;
 
 	err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
 			   sksec_other->sclass,
@@ -4043,9 +4090,13 @@
 	struct sk_security_struct *ssec = sock->sk->sk_security;
 	struct sk_security_struct *osec = other->sk->sk_security;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.sk = other->sk;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->sk = other->sk;
 
 	return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
 			    &ad);
@@ -4081,11 +4132,15 @@
 	struct sk_security_struct *sksec = sk->sk_security;
 	u32 sk_sid = sksec->sid;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 	char *addrp;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.netif = skb->skb_iif;
-	ad.u.net.family = family;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->netif = skb->skb_iif;
+	ad.u.net->family = family;
 	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
 	if (err)
 		return err;
@@ -4112,6 +4167,8 @@
 	u16 family = sk->sk_family;
 	u32 sk_sid = sksec->sid;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 	char *addrp;
 	u8 secmark_active;
 	u8 peerlbl_active;
@@ -4136,8 +4193,10 @@
 		return 0;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.netif = skb->skb_iif;
-	ad.u.net.family = family;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->netif = skb->skb_iif;
+	ad.u.net->family = family;
 	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
 	if (err)
 		return err;
@@ -4472,6 +4531,8 @@
 	char *addrp;
 	u32 peer_sid;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 	u8 secmark_active;
 	u8 netlbl_active;
 	u8 peerlbl_active;
@@ -4489,8 +4550,10 @@
 		return NF_DROP;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.netif = ifindex;
-	ad.u.net.family = family;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->netif = ifindex;
+	ad.u.net->family = family;
 	if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
 		return NF_DROP;
 
@@ -4577,6 +4640,8 @@
 	struct sock *sk = skb->sk;
 	struct sk_security_struct *sksec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 	char *addrp;
 	u8 proto;
 
@@ -4585,8 +4650,10 @@
 	sksec = sk->sk_security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.netif = ifindex;
-	ad.u.net.family = family;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->netif = ifindex;
+	ad.u.net->family = family;
 	if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
 		return NF_DROP;
 
@@ -4608,6 +4675,8 @@
 	u32 peer_sid;
 	struct sock *sk;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
+	struct lsm_network_audit net = {0,};
 	char *addrp;
 	u8 secmark_active;
 	u8 peerlbl_active;
@@ -4654,8 +4723,10 @@
 	}
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.netif = ifindex;
-	ad.u.net.family = family;
+	ad.selinux_audit_data = &sad;
+	ad.u.net = &net;
+	ad.u.net->netif = ifindex;
+	ad.u.net->family = family;
 	if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
 		return NF_DROP;
 
@@ -4770,11 +4841,13 @@
 {
 	struct ipc_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 
 	isec = ipc_perms->security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = ipc_perms->key;
 
 	return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
@@ -4795,6 +4868,7 @@
 {
 	struct ipc_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 	int rc;
 
@@ -4805,6 +4879,7 @@
 	isec = msq->q_perm.security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = msq->q_perm.key;
 
 	rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4825,11 +4900,13 @@
 {
 	struct ipc_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 
 	isec = msq->q_perm.security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = msq->q_perm.key;
 
 	return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4869,6 +4946,7 @@
 	struct ipc_security_struct *isec;
 	struct msg_security_struct *msec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 	int rc;
 
@@ -4890,6 +4968,7 @@
 	}
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = msq->q_perm.key;
 
 	/* Can this process write to the queue? */
@@ -4914,6 +4993,7 @@
 	struct ipc_security_struct *isec;
 	struct msg_security_struct *msec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = task_sid(target);
 	int rc;
 
@@ -4921,6 +5001,7 @@
 	msec = msg->security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = msq->q_perm.key;
 
 	rc = avc_has_perm(sid, isec->sid,
@@ -4936,6 +5017,7 @@
 {
 	struct ipc_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 	int rc;
 
@@ -4946,6 +5028,7 @@
 	isec = shp->shm_perm.security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = shp->shm_perm.key;
 
 	rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4966,11 +5049,13 @@
 {
 	struct ipc_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 
 	isec = shp->shm_perm.security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = shp->shm_perm.key;
 
 	return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -5028,6 +5113,7 @@
 {
 	struct ipc_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 	int rc;
 
@@ -5038,6 +5124,7 @@
 	isec = sma->sem_perm.security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = sma->sem_perm.key;
 
 	rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5058,11 +5145,13 @@
 {
 	struct ipc_security_struct *isec;
 	struct common_audit_data ad;
+	struct selinux_audit_data sad = {0,};
 	u32 sid = current_sid();
 
 	isec = sma->sem_perm.security;
 
 	COMMON_AUDIT_DATA_INIT(&ad, IPC);
+	ad.selinux_audit_data = &sad;
 	ad.u.ipc_id = sma->sem_perm.key;
 
 	return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 005a91b..1931370 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -47,6 +47,31 @@
 };
 
 /*
+ * We only need this data after we have decided to send an audit message.
+ */
+struct selinux_late_audit_data {
+	u32 ssid;
+	u32 tsid;
+	u16 tclass;
+	u32 requested;
+	u32 audited;
+	u32 denied;
+	int result;
+};
+
+/*
+ * We collect this at the beginning or during an selinux security operation
+ */
+struct selinux_audit_data {
+	/*
+	 * auditdeny is a bit tricky and unintuitive.  See the
+	 * comments in avc.c for it's meaning and usage.
+	 */
+	u32 auditdeny;
+	struct selinux_late_audit_data *slad;
+};
+
+/*
  * AVC operations
  */
 
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 48a7d00..d7018bf 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -344,7 +344,7 @@
 static int sel_make_policycap(void);
 
 /* declaration for sel_make_class_dirs */
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
 			unsigned long *ino);
 
 static ssize_t sel_read_mls(struct file *filp, char __user *buf,
@@ -1678,13 +1678,9 @@
 	inode->i_ino = sel_class_to_ino(index);
 	d_add(dentry, inode);
 
-	dentry = d_alloc_name(dir, "perms");
-	if (!dentry)
-		return -ENOMEM;
-
-	rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino);
-	if (rc)
-		return rc;
+	dentry = sel_make_dir(dir, "perms", &last_class_ino);
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
 
 	rc = sel_make_perm_files(classname, index, dentry);
 
@@ -1733,15 +1729,12 @@
 	for (i = 0; i < nclasses; i++) {
 		struct dentry *class_name_dir;
 
-		rc = -ENOMEM;
-		class_name_dir = d_alloc_name(class_dir, classes[i]);
-		if (!class_name_dir)
-			goto out;
-
-		rc = sel_make_dir(class_dir->d_inode, class_name_dir,
+		class_name_dir = sel_make_dir(class_dir, classes[i],
 				&last_class_ino);
-		if (rc)
+		if (IS_ERR(class_name_dir)) {
+			rc = PTR_ERR(class_name_dir);
 			goto out;
+		}
 
 		/* i+1 since class values are 1-indexed */
 		rc = sel_make_class_dir_entries(classes[i], i + 1,
@@ -1787,14 +1780,20 @@
 	return 0;
 }
 
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
 			unsigned long *ino)
 {
+	struct dentry *dentry = d_alloc_name(dir, name);
 	struct inode *inode;
 
-	inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
-	if (!inode)
-		return -ENOMEM;
+	if (!dentry)
+		return ERR_PTR(-ENOMEM);
+
+	inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
+	if (!inode) {
+		dput(dentry);
+		return ERR_PTR(-ENOMEM);
+	}
 
 	inode->i_op = &simple_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
@@ -1803,16 +1802,16 @@
 	inc_nlink(inode);
 	d_add(dentry, inode);
 	/* bump link count on parent directory, too */
-	inc_nlink(dir);
+	inc_nlink(dir->d_inode);
 
-	return 0;
+	return dentry;
 }
 
 static int sel_fill_super(struct super_block *sb, void *data, int silent)
 {
 	int ret;
 	struct dentry *dentry;
-	struct inode *inode, *root_inode;
+	struct inode *inode;
 	struct inode_security_struct *isec;
 
 	static struct tree_descr selinux_files[] = {
@@ -1839,18 +1838,12 @@
 	if (ret)
 		goto err;
 
-	root_inode = sb->s_root->d_inode;
-
-	ret = -ENOMEM;
-	dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME);
-	if (!dentry)
+	bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
+	if (IS_ERR(bool_dir)) {
+		ret = PTR_ERR(bool_dir);
+		bool_dir = NULL;
 		goto err;
-
-	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-	if (ret)
-		goto err;
-
-	bool_dir = dentry;
+	}
 
 	ret = -ENOMEM;
 	dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
@@ -1872,54 +1865,39 @@
 	d_add(dentry, inode);
 	selinux_null = dentry;
 
-	ret = -ENOMEM;
-	dentry = d_alloc_name(sb->s_root, "avc");
-	if (!dentry)
+	dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
+	if (IS_ERR(dentry)) {
+		ret = PTR_ERR(dentry);
 		goto err;
-
-	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-	if (ret)
-		goto err;
+	}
 
 	ret = sel_make_avc_files(dentry);
 	if (ret)
 		goto err;
 
-	ret = -ENOMEM;
-	dentry = d_alloc_name(sb->s_root, "initial_contexts");
-	if (!dentry)
+	dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
+	if (IS_ERR(dentry)) {
+		ret = PTR_ERR(dentry);
 		goto err;
-
-	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-	if (ret)
-		goto err;
+	}
 
 	ret = sel_make_initcon_files(dentry);
 	if (ret)
 		goto err;
 
-	ret = -ENOMEM;
-	dentry = d_alloc_name(sb->s_root, "class");
-	if (!dentry)
+	class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
+	if (IS_ERR(class_dir)) {
+		ret = PTR_ERR(class_dir);
+		class_dir = NULL;
 		goto err;
+	}
 
-	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-	if (ret)
+	policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
+	if (IS_ERR(policycap_dir)) {
+		ret = PTR_ERR(policycap_dir);
+		policycap_dir = NULL;
 		goto err;
-
-	class_dir = dentry;
-
-	ret = -ENOMEM;
-	dentry = d_alloc_name(sb->s_root, "policy_capabilities");
-	if (!dentry)
-		goto err;
-
-	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-	if (ret)
-		goto err;
-
-	policycap_dir = dentry;
-
+	}
 	return 0;
 err:
 	printk(KERN_ERR "SELinux: %s:  failed while creating inodes\n",
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 2ad0065..4ede719 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -185,6 +185,15 @@
  */
 #define SMK_NUM_ACCESS_TYPE 5
 
+/* SMACK data */
+struct smack_audit_data {
+	const char *function;
+	char *subject;
+	char *object;
+	char *request;
+	int result;
+};
+
 /*
  * Smack audit data; is empty if CONFIG_AUDIT not set
  * to save some stack
@@ -192,6 +201,7 @@
 struct smk_audit_info {
 #ifdef CONFIG_AUDIT
 	struct common_audit_data a;
+	struct smack_audit_data sad;
 #endif
 };
 /*
@@ -311,7 +321,16 @@
 {
 	memset(a, 0, sizeof(*a));
 	a->a.type = type;
-	a->a.smack_audit_data.function = func;
+	a->a.smack_audit_data = &a->sad;
+	a->a.smack_audit_data->function = func;
+}
+
+static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func,
+				   char type, struct lsm_network_audit *net)
+{
+	smk_ad_init(a, func, type);
+	memset(net, 0, sizeof(*net));
+	a->a.u.net = net;
 }
 
 static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
@@ -337,7 +356,7 @@
 static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
 					    struct sock *sk)
 {
-	a->a.u.net.sk = sk;
+	a->a.u.net->sk = sk;
 }
 
 #else /* no AUDIT */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index cc7cb6e..c8115f7 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -275,9 +275,9 @@
 static void smack_log_callback(struct audit_buffer *ab, void *a)
 {
 	struct common_audit_data *ad = a;
-	struct smack_audit_data *sad = &ad->smack_audit_data;
+	struct smack_audit_data *sad = ad->smack_audit_data;
 	audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
-			 ad->smack_audit_data.function,
+			 ad->smack_audit_data->function,
 			 sad->result ? "denied" : "granted");
 	audit_log_format(ab, " subject=");
 	audit_log_untrustedstring(ab, sad->subject);
@@ -310,19 +310,19 @@
 	if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
 		return;
 
-	if (a->smack_audit_data.function == NULL)
-		a->smack_audit_data.function = "unknown";
+	sad = a->smack_audit_data;
+
+	if (sad->function == NULL)
+		sad->function = "unknown";
 
 	/* end preparing the audit data */
-	sad = &a->smack_audit_data;
 	smack_str_from_perm(request_buffer, request);
 	sad->subject = subject_label;
 	sad->object  = object_label;
 	sad->request = request_buffer;
 	sad->result  = result;
-	a->lsm_pre_audit = smack_log_callback;
 
-	common_lsm_audit(a);
+	common_lsm_audit(a, smack_log_callback, NULL);
 }
 #else /* #ifdef CONFIG_AUDIT */
 void smack_log(char *subject_label, char *object_label, int request,
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index cd667b4..10056f2 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1943,13 +1943,15 @@
 	rcu_read_lock();
 	hostsp = smack_host_label(sap);
 	if (hostsp != NULL) {
-		sk_lbl = SMACK_UNLABELED_SOCKET;
 #ifdef CONFIG_AUDIT
-		smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
-		ad.a.u.net.family = sap->sin_family;
-		ad.a.u.net.dport = sap->sin_port;
-		ad.a.u.net.v4info.daddr = sap->sin_addr.s_addr;
+		struct lsm_network_audit net;
+
+		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+		ad.a.u.net->family = sap->sin_family;
+		ad.a.u.net->dport = sap->sin_port;
+		ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
 #endif
+		sk_lbl = SMACK_UNLABELED_SOCKET;
 		rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad);
 	} else {
 		sk_lbl = SMACK_CIPSO_SOCKET;
@@ -2810,8 +2812,12 @@
 	struct smk_audit_info ad;
 	int rc = 0;
 
-	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
 	smk_ad_setfield_u_net_sk(&ad, other);
+#endif
 
 	if (!capable(CAP_MAC_OVERRIDE))
 		rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
@@ -2842,8 +2848,12 @@
 	struct smk_audit_info ad;
 	int rc = 0;
 
-	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
 	smk_ad_setfield_u_net_sk(&ad, other->sk);
+#endif
 
 	if (!capable(CAP_MAC_OVERRIDE))
 		rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
@@ -2990,6 +3000,9 @@
 	char *csp;
 	int rc;
 	struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+#endif
 	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
 		return 0;
 
@@ -3007,9 +3020,9 @@
 	netlbl_secattr_destroy(&secattr);
 
 #ifdef CONFIG_AUDIT
-	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
-	ad.a.u.net.family = sk->sk_family;
-	ad.a.u.net.netif = skb->skb_iif;
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+	ad.a.u.net->family = sk->sk_family;
+	ad.a.u.net->netif = skb->skb_iif;
 	ipv4_skb_to_auditdata(skb, &ad.a, NULL);
 #endif
 	/*
@@ -3152,6 +3165,9 @@
 	char *sp;
 	int rc;
 	struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+#endif
 
 	/* handle mapped IPv4 packets arriving via IPv6 sockets */
 	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
@@ -3166,9 +3182,9 @@
 	netlbl_secattr_destroy(&secattr);
 
 #ifdef CONFIG_AUDIT
-	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
-	ad.a.u.net.family = family;
-	ad.a.u.net.netif = skb->skb_iif;
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+	ad.a.u.net->family = family;
+	ad.a.u.net->netif = skb->skb_iif;
 	ipv4_skb_to_auditdata(skb, &ad.a, NULL);
 #endif
 	/*
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index bbe32d2..dbc5507 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -46,7 +46,7 @@
 
   The number of ports to be created can be specified via the module
   parameter "ports".  For example, to create four ports, add the
-  following option in /etc/modprobe.conf:
+  following option in a configuration file under /etc/modprobe.d/:
 
 	option snd-seq-dummy ports=4
 
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index c896116..fe5ae09 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -50,7 +50,8 @@
 	  before the other sound driver of yours, making the
 	  pc-speaker a default sound device. Which is likely not
 	  what you want. To make this driver play nicely with other
-	  sound driver, you can add this into your /etc/modprobe.conf:
+	  sound driver, you can add this in a configuration file under
+	  /etc/modprobe.d/ directory:
 	  options snd-pcsp index=2
 
 	  You don't need this driver if you only want your pc-speaker to beep.
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index babaedd..d7ccf28 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -65,7 +65,7 @@
 static char *id = SNDRV_DEFAULT_STR1;		/* ID for this card */
 //static bool enable = SNDRV_DEFAULT_ENABLE1;	/* Enable this card */
 #ifdef CONFIG_PNP
-static int isapnp = 1;			/* Enable ISA PnP detection */
+static bool isapnp = true;			/* Enable ISA PnP detection */
 #endif
 static long port = SNDRV_DEFAULT_PORT1; 	/* 0x530,0xe80,0xf40,0x604 */
 static long mpu_port = SNDRV_DEFAULT_PORT1;	/* 0x300,0x310,0x320,0x330 */
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index b4a6aa9..8490f59 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -1019,13 +1019,15 @@
 	irq_cfg = get_irq_config(sscape->type, irq[dev]);
 	if (irq_cfg == INVALID_IRQ) {
 		snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", irq[dev]);
-		return -ENXIO;
+		err = -ENXIO;
+		goto _release_dma;
 	}
 
 	mpu_irq_cfg = get_irq_config(sscape->type, mpu_irq[dev]);
 	if (mpu_irq_cfg == INVALID_IRQ) {
 		snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", mpu_irq[dev]);
-		return -ENXIO;
+		err = -ENXIO;
+		goto _release_dma;
 	}
 
 	/*
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
index eba7345..536c4c0 100644
--- a/sound/oss/msnd_pinnacle.c
+++ b/sound/oss/msnd_pinnacle.c
@@ -1294,6 +1294,8 @@
 
 static int upload_dsp_code(void)
 {
+	int ret = 0;
+
 	msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
 #ifndef HAVE_DSPCODEH
 	INITCODESIZE = mod_firmware_load(INITCODEFILE, &INITCODE);
@@ -1312,7 +1314,8 @@
 	memcpy_toio(dev.base, PERMCODE, PERMCODESIZE);
 	if (msnd_upload_host(&dev, INITCODE, INITCODESIZE) < 0) {
 		printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto out;
 	}
 #ifdef HAVE_DSPCODEH
 	printk(KERN_INFO LOGNAME ": DSP firmware uploaded (resident)\n");
@@ -1320,12 +1323,13 @@
 	printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n");
 #endif
 
+out:
 #ifndef HAVE_DSPCODEH
 	vfree(INITCODE);
 	vfree(PERMCODE);
 #endif
 
-	return 0;
+	return ret;
 }
 
 #ifdef MSND_CLASSIC
@@ -1631,7 +1635,7 @@
 static int joystick_io __initdata = 0;
 
 /* If we have the digital daugherboard... */
-static int digital __initdata = 0;
+static bool digital __initdata = false;
 #endif
 
 static int fifosize __initdata =	DEFFIFOSIZE;
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 8816804..5ca0939 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -2,8 +2,8 @@
 
 config SND_TEA575X
 	tristate
-	depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2
-	default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2
+	depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2 || RADIO_MAXIRADIO
+	default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2 || RADIO_MAXIRADIO
 
 menuconfig SND_PCI
 	bool "PCI sound devices"
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 4cc315d..bc86cb7 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -1,7 +1,7 @@
 /******************************************************************************
 
     AudioScience HPI driver
-    Copyright (C) 1997-2011  AudioScience Inc. <support@audioscience.com>
+    Copyright (C) 1997-2012  AudioScience Inc. <support@audioscience.com>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of version 2 of the GNU General Public License as
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index 2d7d1c2..5ef4fe9 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -1,7 +1,7 @@
 /******************************************************************************
 
     AudioScience HPI driver
-    Copyright (C) 1997-2011  AudioScience Inc. <support@audioscience.com>
+    Copyright (C) 1997-2012  AudioScience Inc. <support@audioscience.com>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of version 2 of the GNU General Public License as
@@ -39,9 +39,9 @@
 
 }
 
-/** Allocated an area of locked memory for bus master DMA operations.
+/** Allocate an area of locked memory for bus master DMA operations.
 
-On error, return -ENOMEM, and *pMemArea.size = 0
+If allocation fails, return 1, and *pMemArea.size = 0
 */
 u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
 	struct pci_dev *pdev)
@@ -62,7 +62,7 @@
 		HPI_DEBUG_LOG(WARNING,
 			"failed to allocate %d bytes locked memory\n", size);
 		p_mem_area->size = 0;
-		return -ENOMEM;
+		return 1;
 	}
 }
 
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 9a9f372..56b4f74 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -851,6 +851,9 @@
 	unsigned int pin_amp_workaround:1; /* pin out-amp takes index
 					    * (e.g. Conexant codecs)
 					    */
+	unsigned int single_adc_amp:1; /* adc in-amp takes no index
+					* (e.g. CX20549 codec)
+					*/
 	unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
 	unsigned int pins_shutup:1;	/* pins are shut up */
 	unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index b58b4b1..4c054f4 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -418,7 +418,7 @@
 	else
 		buf2[0] = '\0';
 
-	printk(KERN_INFO "HDMI: supports coding type %s:"
+	_snd_printd(SND_PR_VERBOSE, "HDMI: supports coding type %s:"
 			" channels = %d, rates =%s%s\n",
 			cea_audio_coding_type_names[a->format],
 			a->channels,
@@ -442,14 +442,14 @@
 {
 	int i;
 
-	printk(KERN_INFO "HDMI: detected monitor %s at connection type %s\n",
+	_snd_printd(SND_PR_VERBOSE, "HDMI: detected monitor %s at connection type %s\n",
 			e->monitor_name,
 			eld_connection_type_names[e->conn_type]);
 
 	if (e->spk_alloc) {
 		char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
 		snd_print_channel_allocation(e->spk_alloc, buf, sizeof(buf));
-		printk(KERN_INFO "HDMI: available speakers:%s\n", buf);
+		_snd_printd(SND_PR_VERBOSE, "HDMI: available speakers:%s\n", buf);
 	}
 
 	for (i = 0; i < e->sad_count; i++)
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 254ab52..e59e2f0 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -651,9 +651,16 @@
 			snd_iprintf(buffer, "  Amp-In caps: ");
 			print_amp_caps(buffer, codec, nid, HDA_INPUT);
 			snd_iprintf(buffer, "  Amp-In vals: ");
-			print_amp_vals(buffer, codec, nid, HDA_INPUT,
-				       wid_caps & AC_WCAP_STEREO,
-				       wid_type == AC_WID_PIN ? 1 : conn_len);
+			if (wid_type == AC_WID_PIN ||
+			    (codec->single_adc_amp &&
+			     wid_type == AC_WID_AUD_IN))
+				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+					       wid_caps & AC_WCAP_STEREO,
+					       1);
+			else
+				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+					       wid_caps & AC_WCAP_STEREO,
+					       conn_len);
 		}
 		if (wid_caps & AC_WCAP_OUT_AMP) {
 			snd_iprintf(buffer, "  Amp-Out caps: ");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 8c6523b..a36488d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -141,7 +141,6 @@
 	unsigned int hp_laptop:1;
 	unsigned int asus:1;
 	unsigned int pin_eapd_ctrls:1;
-	unsigned int single_adc_amp:1;
 
 	unsigned int adc_switching:1;
 
@@ -687,27 +686,26 @@
 static const struct hda_input_mux cxt5045_capture_source = {
 	.num_items = 2,
 	.items = {
-		{ "IntMic", 0x1 },
-		{ "ExtMic", 0x2 },
+		{ "Internal Mic", 0x1 },
+		{ "Mic",          0x2 },
 	}
 };
 
 static const struct hda_input_mux cxt5045_capture_source_benq = {
-	.num_items = 5,
+	.num_items = 4,
 	.items = {
-		{ "IntMic", 0x1 },
-		{ "ExtMic", 0x2 },
-		{ "LineIn", 0x3 },
-		{ "CD",     0x4 },
-		{ "Mixer",  0x0 },
+		{ "Internal Mic", 0x1 },
+		{ "Mic",          0x2 },
+		{ "Line",         0x3 },
+		{ "Mixer",        0x0 },
 	}
 };
 
 static const struct hda_input_mux cxt5045_capture_source_hp530 = {
 	.num_items = 2,
 	.items = {
-		{ "ExtMic", 0x1 },
-		{ "IntMic", 0x2 },
+		{ "Mic",          0x1 },
+		{ "Internal Mic", 0x2 },
 	}
 };
 
@@ -798,10 +796,8 @@
 }
 
 static const struct snd_kcontrol_new cxt5045_mixers[] = {
-	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
@@ -822,27 +818,15 @@
 };
 
 static const struct snd_kcontrol_new cxt5045_benq_mixers[] = {
-	HDA_CODEC_VOLUME("CD Capture Volume", 0x1a, 0x04, HDA_INPUT),
-	HDA_CODEC_MUTE("CD Capture Switch", 0x1a, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("CD Playback Volume", 0x17, 0x4, HDA_INPUT),
-	HDA_CODEC_MUTE("CD Playback Switch", 0x17, 0x4, HDA_INPUT),
-
-	HDA_CODEC_VOLUME("Line In Capture Volume", 0x1a, 0x03, HDA_INPUT),
-	HDA_CODEC_MUTE("Line In Capture Switch", 0x1a, 0x03, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Playback Volume", 0x17, 0x3, HDA_INPUT),
-	HDA_CODEC_MUTE("Line In Playback Switch", 0x17, 0x3, HDA_INPUT),
-
-	HDA_CODEC_VOLUME("Mixer Capture Volume", 0x1a, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer Capture Switch", 0x1a, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x3, HDA_INPUT),
+	HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x3, HDA_INPUT),
 
 	{}
 };
 
 static const struct snd_kcontrol_new cxt5045_mixers_hp530[] = {
-	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
@@ -946,10 +930,10 @@
 	/* Output controls */
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x10, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Speaker Playback Switch", 0x10, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Node 11 Playback Volume", 0x11, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Node 11 Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Node 12 Playback Volume", 0x12, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Node 12 Playback Switch", 0x12, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("HP-OUT Playback Volume", 0x11, 0x0, HDA_OUTPUT),
+	HDA_CODEC_MUTE("HP-OUT Playback Switch", 0x11, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("LINE1 Playback Volume", 0x12, 0x0, HDA_OUTPUT),
+	HDA_CODEC_MUTE("LINE1 Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	
 	/* Modes for retasking pin widgets */
 	CXT_PIN_MODE("HP-OUT pin mode", 0x11, CXT_PIN_DIR_INOUT),
@@ -960,16 +944,16 @@
 
 	/* Loopback mixer controls */
 
-	HDA_CODEC_VOLUME("Mixer-1 Volume", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-1 Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-2 Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-2 Switch", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-3 Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-3 Switch", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-4 Volume", 0x17, 0x3, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-4 Switch", 0x17, 0x3, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-5 Volume", 0x17, 0x4, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-5 Switch", 0x17, 0x4, HDA_INPUT),
+	HDA_CODEC_VOLUME("PCM Volume", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("PCM Switch", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("MIC1 pin Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("MIC1 pin Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("LINE1 pin Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("LINE1 pin Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("HP-OUT pin Volume", 0x17, 0x3, HDA_INPUT),
+	HDA_CODEC_MUTE("HP-OUT pin Switch", 0x17, 0x3, HDA_INPUT),
+	HDA_CODEC_VOLUME("CD pin Volume", 0x17, 0x4, HDA_INPUT),
+	HDA_CODEC_MUTE("CD pin Switch", 0x17, 0x4, HDA_INPUT),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = "Input Source",
@@ -978,16 +962,8 @@
 		.put = conexant_mux_enum_put,
 	},
 	/* Audio input controls */
-	HDA_CODEC_VOLUME("Input-1 Volume", 0x1a, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-1 Switch", 0x1a, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-2 Volume", 0x1a, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-2 Switch", 0x1a, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-3 Volume", 0x1a, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-3 Switch", 0x1a, 0x2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-4 Volume", 0x1a, 0x3, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-4 Switch", 0x1a, 0x3, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-5 Volume", 0x1a, 0x4, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-5 Switch", 0x1a, 0x4, HDA_INPUT),
+	HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -1009,10 +985,6 @@
 	{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 	{0x18, AC_VERB_SET_DIGI_CONVERT_1, 0},
 
-	/* Start with output sum widgets muted and their output gains at min */
-	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-
 	/* Unmute retasking pin widget output buffers since the default
 	 * state appears to be output.  As the pin mode is changed by the
 	 * user the pin mode control will take care of enabling the pin's
@@ -1027,11 +999,11 @@
 	/* Set ADC connection select to match default mixer setting (mic1
 	 * pin)
 	 */
-	{0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
-	{0x17, AC_VERB_SET_CONNECT_SEL, 0x00},
+	{0x1a, AC_VERB_SET_CONNECT_SEL, 0x01},
+	{0x17, AC_VERB_SET_CONNECT_SEL, 0x01},
 
 	/* Mute all inputs to mixer widget (even unconnected ones) */
-	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* Mixer pin */
+	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* Mixer */
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, /* Mic1 pin */
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, /* Line pin */
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, /* HP pin */
@@ -1110,7 +1082,7 @@
 	if (!spec)
 		return -ENOMEM;
 	codec->spec = spec;
-	codec->pin_amp_workaround = 1;
+	codec->single_adc_amp = 1;
 
 	spec->multiout.max_channels = 2;
 	spec->multiout.num_dacs = ARRAY_SIZE(cxt5045_dac_nids);
@@ -4220,7 +4192,7 @@
 		int idx = get_input_connection(codec, adc_nid, nid);
 		if (idx < 0)
 			continue;
-		if (spec->single_adc_amp)
+		if (codec->single_adc_amp)
 			idx = 0;
 		return cx_auto_add_volume_idx(codec, label, pfx,
 					      cidx, adc_nid, HDA_INPUT, idx);
@@ -4275,7 +4247,7 @@
 		if (cidx < 0)
 			continue;
 		input_conn[i] = spec->imux_info[i].adc;
-		if (!spec->single_adc_amp)
+		if (!codec->single_adc_amp)
 			input_conn[i] |= cidx << 8;
 		if (i > 0 && input_conn[i] != input_conn[0])
 			multi_connection = 1;
@@ -4466,15 +4438,17 @@
 	if (!spec)
 		return -ENOMEM;
 	codec->spec = spec;
-	codec->pin_amp_workaround = 1;
 
 	switch (codec->vendor_id) {
 	case 0x14f15045:
-		spec->single_adc_amp = 1;
+		codec->single_adc_amp = 1;
 		break;
 	case 0x14f15051:
 		add_cx5051_fake_mutes(codec);
+		codec->pin_amp_workaround = 1;
 		break;
+	default:
+		codec->pin_amp_workaround = 1;
 	}
 
 	apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 540cd13..83f345f 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -757,8 +757,6 @@
 	struct hdmi_spec *spec = codec->spec;
 	int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
 	int pin_nid;
-	int pd = !!(res & AC_UNSOL_RES_PD);
-	int eldv = !!(res & AC_UNSOL_RES_ELDV);
 	int pin_idx;
 	struct hda_jack_tbl *jack;
 
@@ -768,9 +766,10 @@
 	pin_nid = jack->nid;
 	jack->jack_dirty = 1;
 
-	printk(KERN_INFO
+	_snd_printd(SND_PR_VERBOSE,
 		"HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
-		codec->addr, pin_nid, pd, eldv);
+		codec->addr, pin_nid,
+		!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
 
 	pin_idx = pin_nid_to_pin_index(spec, pin_nid);
 	if (pin_idx < 0)
@@ -992,7 +991,7 @@
 	if (eld->monitor_present)
 		eld_valid	= !!(present & AC_PINSENSE_ELDV);
 
-	printk(KERN_INFO
+	_snd_printd(SND_PR_VERBOSE,
 		"HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
 		codec->addr, pin_nid, eld->monitor_present, eld_valid);
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8ea2fd6..9917e55 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2717,9 +2717,6 @@
 	int max_nums = ARRAY_SIZE(spec->private_adc_nids);
 	int i, nums = 0;
 
-	if (spec->shared_mic_hp)
-		max_nums = 1; /* no multi streams with the shared HP/mic */
-
 	nid = codec->start_nid;
 	for (i = 0; i < codec->num_nodes; i++, nid++) {
 		hda_nid_t src;
@@ -4076,6 +4073,7 @@
 	if (spec->dyn_adc_switch)
 		return;
 
+ again:
 	nums = 0;
 	for (n = 0; n < spec->num_adc_nids; n++) {
 		hda_nid_t cap = spec->private_capsrc_nids[n];
@@ -4096,6 +4094,11 @@
 	if (!nums) {
 		/* check whether ADC-switch is possible */
 		if (!alc_check_dyn_adc_switch(codec)) {
+			if (spec->shared_mic_hp) {
+				spec->shared_mic_hp = 0;
+				spec->private_imux[0].num_items = 1;
+				goto again;
+			}
 			printk(KERN_WARNING "hda_codec: %s: no valid ADC found;"
 			       " using fallback 0x%x\n",
 			       codec->chip_name, spec->private_adc_nids[0]);
@@ -4113,7 +4116,7 @@
 
 	if (spec->auto_mic)
 		alc_auto_mic_check_imux(codec); /* check auto-mic setups */
-	else if (spec->input_mux->num_items == 1)
+	else if (spec->input_mux->num_items == 1 || spec->shared_mic_hp)
 		spec->num_adc_nids = 1; /* reduce to a single ADC */
 }
 
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index f8e10ce..b3e24f2 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -140,7 +140,7 @@
  * min : 0xFE : -115.0 dB
  * mute: 0xFF
  */
-static const DECLARE_TLV_DB_SCALE(out_tlv, -11500, 50, 1);
+static const DECLARE_TLV_DB_SCALE(out_tlv, -11550, 50, 1);
 
 static const struct snd_kcontrol_new ak4642_snd_controls[] = {
 
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d192626..8e92fb8 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -143,11 +143,11 @@
 }
 
 /*
- * using codec assist to small pop, hp_powerup or lineout_powerup
- * should stay setting until vag_powerup is fully ramped down,
- * vag fully ramped down require 400ms.
+ * As manual described, ADC/DAC only works when VAG powerup,
+ * So enabled VAG before ADC/DAC up.
+ * In power down case, we need wait 400ms when vag fully ramped down.
  */
-static int small_pop_event(struct snd_soc_dapm_widget *w,
+static int power_vag_event(struct snd_soc_dapm_widget *w,
 	struct snd_kcontrol *kcontrol, int event)
 {
 	switch (event) {
@@ -156,7 +156,7 @@
 			SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
 		break;
 
-	case SND_SOC_DAPM_PRE_PMD:
+	case SND_SOC_DAPM_POST_PMD:
 		snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
 			SGTL5000_VAG_POWERUP, 0);
 		msleep(400);
@@ -201,12 +201,8 @@
 				mic_bias_event,
 				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
-	SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
-			small_pop_event,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
-	SND_SOC_DAPM_PGA_E("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0,
-			small_pop_event,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
 
 	SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
 	SND_SOC_DAPM_MUX("Headphone Mux", SND_SOC_NOPM, 0, 0, &dac_mux),
@@ -221,8 +217,11 @@
 				0, SGTL5000_CHIP_DIG_POWER,
 				1, 0),
 
-	SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
+	SND_SOC_DAPM_SUPPLY("VAG_POWER", SGTL5000_CHIP_ANA_POWER, 7, 0,
+			    power_vag_event,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
 	SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
 };
 
@@ -231,9 +230,11 @@
 	{"Capture Mux", "LINE_IN", "LINE_IN"},	/* line_in --> adc_mux */
 	{"Capture Mux", "MIC_IN", "MIC_IN"},	/* mic_in --> adc_mux */
 
+	{"ADC", NULL, "VAG_POWER"},
 	{"ADC", NULL, "Capture Mux"},		/* adc_mux --> adc */
 	{"AIFOUT", NULL, "ADC"},		/* adc --> i2s_out */
 
+	{"DAC", NULL, "VAG_POWER"},
 	{"DAC", NULL, "AIFIN"},			/* i2s-->dac,skip audio mux */
 	{"Headphone Mux", "DAC", "DAC"},	/* dac --> hp_mux */
 	{"LO", NULL, "DAC"},			/* dac --> line_out */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index fe7fbae..7c49642 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3629,7 +3629,7 @@
 		case 2:
 		case 3:
 			wm8994->hubs.dcs_codes_l = -9;
-			wm8994->hubs.dcs_codes_r = -5;
+			wm8994->hubs.dcs_codes_r = -7;
 			break;
 		default:
 			break;
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index afbabf4..3fea5a1 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -58,9 +58,9 @@
 {
 	struct mpc8610_hpcd_data *machine_data =
 		container_of(card, struct mpc8610_hpcd_data, card);
-	struct ccsr_guts_86xx __iomem *guts;
+	struct ccsr_guts __iomem *guts;
 
-	guts = ioremap(guts_phys, sizeof(struct ccsr_guts_86xx));
+	guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
 	if (!guts) {
 		dev_err(card->dev, "could not map global utilities\n");
 		return -ENOMEM;
@@ -142,9 +142,9 @@
 {
 	struct mpc8610_hpcd_data *machine_data =
 		container_of(card, struct mpc8610_hpcd_data, card);
-	struct ccsr_guts_86xx __iomem *guts;
+	struct ccsr_guts __iomem *guts;
 
-	guts = ioremap(guts_phys, sizeof(struct ccsr_guts_86xx));
+	guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
 	if (!guts) {
 		dev_err(card->dev, "could not map global utilities\n");
 		return -ENOMEM;
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index 4662340..982a1c9 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -46,7 +46,7 @@
  * ch: The channel on the DMA controller (0, 1, 2, or 3)
  * device: The device to set as the target (CCSR_GUTS_DMUXCR_xxx)
  */
-static inline void guts_set_dmuxcr(struct ccsr_guts_85xx __iomem *guts,
+static inline void guts_set_dmuxcr(struct ccsr_guts __iomem *guts,
 	unsigned int co, unsigned int ch, unsigned int device)
 {
 	unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
@@ -90,9 +90,9 @@
 {
 	struct machine_data *mdata =
 		container_of(card, struct machine_data, card);
-	struct ccsr_guts_85xx __iomem *guts;
+	struct ccsr_guts __iomem *guts;
 
-	guts = ioremap(guts_phys, sizeof(struct ccsr_guts_85xx));
+	guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
 	if (!guts) {
 		dev_err(card->dev, "could not map global utilities\n");
 		return -ENOMEM;
@@ -164,9 +164,9 @@
 {
 	struct machine_data *mdata =
 		container_of(card, struct machine_data, card);
-	struct ccsr_guts_85xx __iomem *guts;
+	struct ccsr_guts __iomem *guts;
 
-	guts = ioremap(guts_phys, sizeof(struct ccsr_guts_85xx));
+	guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
 	if (!guts) {
 		dev_err(card->dev, "could not map global utilities\n");
 		return -ENOMEM;
diff --git a/sound/soc/imx/imx-audmux.c b/sound/soc/imx/imx-audmux.c
index 601df80..f237003 100644
--- a/sound/soc/imx/imx-audmux.c
+++ b/sound/soc/imx/imx-audmux.c
@@ -40,12 +40,6 @@
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *audmux_debugfs_root;
 
-static int audmux_open_file(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 /* There is an annoying discontinuity in the SSI numbering with regard
  * to the Linux number of the devices */
 static const char *audmux_port_string(int port)
@@ -79,6 +73,9 @@
 	if (!buf)
 		return -ENOMEM;
 
+	if (!audmux_base)
+		return -ENOSYS;
+
 	if (audmux_clk)
 		clk_prepare_enable(audmux_clk);
 
@@ -142,7 +139,7 @@
 }
 
 static const struct file_operations audmux_debugfs_fops = {
-	.open = audmux_open_file,
+	.open = simple_open,
 	.read = audmux_read_file,
 	.llseek = default_llseek,
 };
@@ -158,7 +155,7 @@
 		return;
 	}
 
-	for (i = 1; i < 8; i++) {
+	for (i = 0; i < MX31_AUDMUX_PORT6_SSI_PINS_6 + 1; i++) {
 		snprintf(buf, sizeof(buf), "ssi%d", i);
 		if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
 					 (void *)i, &audmux_debugfs_fops))
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index 6ca1f46..e373fbb 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -28,6 +28,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/dmaengine.h>
+#include <linux/fsl/mxs-dma.h>
 
 #include <sound/core.h>
 #include <sound/initval.h>
@@ -36,7 +37,6 @@
 #include <sound/soc.h>
 #include <sound/dmaengine_pcm.h>
 
-#include <mach/dma.h>
 #include "mxs-pcm.h"
 
 struct mxs_pcm_dma_data {
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 12be05b..53f4fd8 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -24,12 +24,12 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/time.h>
+#include <linux/fsl/mxs-dma.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 #include <sound/saif.h>
-#include <mach/dma.h>
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
 #include <mach/mxs.h>
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 609abd5..d085837 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -17,6 +17,7 @@
 #include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/initval.h>
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a4deebc..accdcb7 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -201,12 +201,6 @@
 static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set);
 
 #ifdef CONFIG_DEBUG_FS
-static int codec_reg_open_file(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf,
 				   size_t count, loff_t *ppos)
 {
@@ -264,7 +258,7 @@
 }
 
 static const struct file_operations codec_reg_fops = {
-	.open = codec_reg_open_file,
+	.open = simple_open,
 	.read = codec_reg_read_file,
 	.write = codec_reg_write_file,
 	.llseek = default_llseek,
@@ -1087,6 +1081,8 @@
 		snd_soc_dapm_new_controls(&platform->dapm,
 			driver->dapm_widgets, driver->num_dapm_widgets);
 
+	platform->dapm.idle_bias_off = 1;
+
 	if (driver->probe) {
 		ret = driver->probe(platform);
 		if (ret < 0) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 6241490..5cbd2d76 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1544,12 +1544,6 @@
 }
 
 #ifdef CONFIG_DEBUG_FS
-static int dapm_widget_power_open_file(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t dapm_widget_power_read_file(struct file *file,
 					   char __user *user_buf,
 					   size_t count, loff_t *ppos)
@@ -1613,17 +1607,11 @@
 }
 
 static const struct file_operations dapm_widget_power_fops = {
-	.open = dapm_widget_power_open_file,
+	.open = simple_open,
 	.read = dapm_widget_power_read_file,
 	.llseek = default_llseek,
 };
 
-static int dapm_bias_open_file(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
 static ssize_t dapm_bias_read_file(struct file *file, char __user *user_buf,
 				   size_t count, loff_t *ppos)
 {
@@ -1654,7 +1642,7 @@
 }
 
 static const struct file_operations dapm_bias_fops = {
-	.open = dapm_bias_open_file,
+	.open = simple_open,
 	.read = dapm_bias_read_file,
 	.llseek = default_llseek,
 };
diff --git a/sound/soc/soc-dmaengine-pcm.c b/sound/soc/soc-dmaengine-pcm.c
index 4420b70..4756952 100644
--- a/sound/soc/soc-dmaengine-pcm.c
+++ b/sound/soc/soc-dmaengine-pcm.c
@@ -143,7 +143,7 @@
 	direction = snd_pcm_substream_to_dma_direction(substream);
 
 	prtd->pos = 0;
-	desc = chan->device->device_prep_dma_cyclic(chan,
+	desc = dmaengine_prep_dma_cyclic(chan,
 		substream->runtime->dma_addr,
 		snd_pcm_lib_buffer_bytes(substream),
 		snd_pcm_lib_period_bytes(substream), direction);
diff --git a/sound/soc/tegra/tegra_i2s.c b/sound/soc/tegra/tegra_i2s.c
index 33509de..e533499 100644
--- a/sound/soc/tegra/tegra_i2s.c
+++ b/sound/soc/tegra/tegra_i2s.c
@@ -79,11 +79,15 @@
 	struct tegra_i2s *i2s = s->private;
 	int i;
 
+	clk_enable(i2s->clk_i2s);
+
 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 		u32 val = tegra_i2s_read(i2s, regs[i].offset);
 		seq_printf(s, "%s = %08x\n", regs[i].name, val);
 	}
 
+	clk_disable(i2s->clk_i2s);
+
 	return 0;
 }
 
@@ -112,7 +116,7 @@
 		debugfs_remove(i2s->debug);
 }
 #else
-static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s, int id)
+static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s)
 {
 }
 
diff --git a/sound/soc/tegra/tegra_spdif.c b/sound/soc/tegra/tegra_spdif.c
index 475428c..9ff2c60 100644
--- a/sound/soc/tegra/tegra_spdif.c
+++ b/sound/soc/tegra/tegra_spdif.c
@@ -79,11 +79,15 @@
 	struct tegra_spdif *spdif = s->private;
 	int i;
 
+	clk_enable(spdif->clk_spdif_out);
+
 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 		u32 val = tegra_spdif_read(spdif, regs[i].offset);
 		seq_printf(s, "%s = %08x\n", regs[i].name, val);
 	}
 
+	clk_disable(spdif->clk_spdif_out);
+
 	return 0;
 }
 
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 87feeee..2d89f02 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -48,6 +48,9 @@
 	Only consider these symbols. CSV that understands
 	file://filename entries.
 
+--symbol-filter=::
+	Only show symbols that match (partially) with this filter.
+
 -U::
 --hide-unresolved::
         Only display entries resolved to a symbol.
@@ -110,6 +113,8 @@
 	requires a tty, if one is not present, as when piping to other
 	commands, the stdio interface is used.
 
+--gtk:: Use the GTK2 interface.
+
 -k::
 --vmlinux=<file>::
         vmlinux pathname
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 74fd7f8..820371f 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -182,7 +182,7 @@
 
 ### --- END CONFIGURATION SECTION ---
 
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 BASIC_LDFLAGS =
 
 # Guard against environment variables
@@ -234,6 +234,25 @@
 
 export PERL_PATH
 
+FLEX = $(CROSS_COMPILE)flex
+BISON= $(CROSS_COMPILE)bison
+
+event-parser:
+	$(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
+	$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
+
+$(OUTPUT)util/parse-events-flex.c: event-parser
+$(OUTPUT)util/parse-events-bison.c: event-parser
+
+pmu-parser:
+	$(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+	$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
+
+$(OUTPUT)util/pmu-flex.c: pmu-parser
+$(OUTPUT)util/pmu-bison.c: pmu-parser
+
+$(OUTPUT)util/parse-events.o: event-parser pmu-parser
+
 LIB_FILE=$(OUTPUT)libperf.a
 
 LIB_H += ../../include/linux/perf_event.h
@@ -249,7 +268,7 @@
 LIB_H += util/include/linux/ctype.h
 LIB_H += util/include/linux/kernel.h
 LIB_H += util/include/linux/list.h
-LIB_H += util/include/linux/module.h
+LIB_H += util/include/linux/export.h
 LIB_H += util/include/linux/poison.h
 LIB_H += util/include/linux/prefetch.h
 LIB_H += util/include/linux/rbtree.h
@@ -276,6 +295,7 @@
 LIB_H += util/debug.h
 LIB_H += util/debugfs.h
 LIB_H += util/sysfs.h
+LIB_H += util/pmu.h
 LIB_H += util/event.h
 LIB_H += util/evsel.h
 LIB_H += util/evlist.h
@@ -323,6 +343,7 @@
 LIB_OBJS += $(OUTPUT)util/ctype.o
 LIB_OBJS += $(OUTPUT)util/debugfs.o
 LIB_OBJS += $(OUTPUT)util/sysfs.o
+LIB_OBJS += $(OUTPUT)util/pmu.o
 LIB_OBJS += $(OUTPUT)util/environment.o
 LIB_OBJS += $(OUTPUT)util/event.o
 LIB_OBJS += $(OUTPUT)util/evlist.o
@@ -359,6 +380,10 @@
 LIB_OBJS += $(OUTPUT)util/thread.o
 LIB_OBJS += $(OUTPUT)util/thread_map.o
 LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
+LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
+LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
+LIB_OBJS += $(OUTPUT)util/pmu-flex.o
+LIB_OBJS += $(OUTPUT)util/pmu-bison.o
 LIB_OBJS += $(OUTPUT)util/trace-event-read.o
 LIB_OBJS += $(OUTPUT)util/trace-event-info.o
 LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
@@ -501,6 +526,20 @@
 	endif
 endif
 
+ifdef NO_GTK2
+	BASIC_CFLAGS += -DNO_GTK2
+else
+	FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
+	ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
+		msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
+		BASIC_CFLAGS += -DNO_GTK2_SUPPORT
+	else
+		BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
+		EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
+		LIB_OBJS += $(OUTPUT)util/gtk/browser.o
+	endif
+endif
+
 ifdef NO_LIBPERL
 	BASIC_CFLAGS += -DNO_LIBPERL
 else
@@ -647,6 +686,8 @@
 	QUIET_LINK     = @echo '   ' LINK $@;
 	QUIET_MKDIR    = @echo '   ' MKDIR $@;
 	QUIET_GEN      = @echo '   ' GEN $@;
+	QUIET_FLEX     = @echo '   ' FLEX $@;
+	QUIET_BISON    = @echo '   ' BISON $@;
 endif
 endif
 
@@ -727,12 +768,28 @@
 	$(SCRIPTS) \
 	: $(OUTPUT)PERF-VERSION-FILE
 
+.SUFFIXES:
+.SUFFIXES: .o .c .S .s
+
+# These two need to be here so that when O= is not used they take precedence
+# over the general rule for .o
+
+$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
+	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+
+$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
+	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+
 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
 	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
+	$(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
 $(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
-	$(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
+	$(QUIET_CC)$(CC) -o $@ -S $(ALL_CFLAGS) $<
 $(OUTPUT)%.o: %.S
 	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.s: %.S
+	$(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
 
 $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
 	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
@@ -795,6 +852,8 @@
 	@echo '  html		- make html documentation'
 	@echo '  info		- make GNU info documentation (access with info <foo>)'
 	@echo '  pdf		- make pdf documentation'
+	@echo '  event-parser	- make event parser code'
+	@echo '  pmu-parser	- make pmu format parser code'
 	@echo '  TAGS		- use etags to make tag information for source browsing'
 	@echo '  tags		- use ctags to make tag information for source browsing'
 	@echo '  cscope	- use cscope to make interactive browsing database'
@@ -931,6 +990,7 @@
 	$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
 	$(MAKE) -C Documentation/ clean
 	$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
+	$(RM) $(OUTPUT)util/*-{bison,flex}*
 	$(python-clean)
 
 .PHONY: all install clean strip
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 4f19513..d29d350 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -24,6 +24,11 @@
 static bool  force;
 static bool show_displacement;
 
+struct perf_diff {
+	struct perf_tool tool;
+	struct perf_session *session;
+};
+
 static int hists__add_entry(struct hists *self,
 			    struct addr_location *al, u64 period)
 {
@@ -32,12 +37,14 @@
 	return -ENOMEM;
 }
 
-static int diff__process_sample_event(struct perf_tool *tool __used,
+static int diff__process_sample_event(struct perf_tool *tool,
 				      union perf_event *event,
 				      struct perf_sample *sample,
 				      struct perf_evsel *evsel __used,
 				      struct machine *machine)
 {
+	struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
+	struct perf_session *session = _diff->session;
 	struct addr_location al;
 
 	if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
@@ -49,24 +56,26 @@
 	if (al.filtered || al.sym == NULL)
 		return 0;
 
-	if (hists__add_entry(&evsel->hists, &al, sample->period)) {
+	if (hists__add_entry(&session->hists, &al, sample->period)) {
 		pr_warning("problem incrementing symbol period, skipping event\n");
 		return -1;
 	}
 
-	evsel->hists.stats.total_period += sample->period;
+	session->hists.stats.total_period += sample->period;
 	return 0;
 }
 
-static struct perf_tool perf_diff = {
-	.sample	= diff__process_sample_event,
-	.mmap	= perf_event__process_mmap,
-	.comm	= perf_event__process_comm,
-	.exit	= perf_event__process_task,
-	.fork	= perf_event__process_task,
-	.lost	= perf_event__process_lost,
-	.ordered_samples = true,
-	.ordering_requires_timestamps = true,
+static struct perf_diff diff = {
+	.tool = {
+		.sample	= diff__process_sample_event,
+		.mmap	= perf_event__process_mmap,
+		.comm	= perf_event__process_comm,
+		.exit	= perf_event__process_task,
+		.fork	= perf_event__process_task,
+		.lost	= perf_event__process_lost,
+		.ordered_samples = true,
+		.ordering_requires_timestamps = true,
+	},
 };
 
 static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -107,12 +116,6 @@
 	self->entries = tmp;
 }
 
-static void hists__set_positions(struct hists *self)
-{
-	hists__output_resort(self);
-	hists__resort_entries(self);
-}
-
 static struct hist_entry *hists__find_entry(struct hists *self,
 					    struct hist_entry *he)
 {
@@ -146,30 +149,37 @@
 static int __cmd_diff(void)
 {
 	int ret, i;
+#define older (session[0])
+#define newer (session[1])
 	struct perf_session *session[2];
 
-	session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff);
-	session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff);
+	older = perf_session__new(input_old, O_RDONLY, force, false,
+				  &diff.tool);
+	newer = perf_session__new(input_new, O_RDONLY, force, false,
+				  &diff.tool);
 	if (session[0] == NULL || session[1] == NULL)
 		return -ENOMEM;
 
 	for (i = 0; i < 2; ++i) {
-		ret = perf_session__process_events(session[i], &perf_diff);
+		diff.session = session[i];
+		ret = perf_session__process_events(session[i], &diff.tool);
 		if (ret)
 			goto out_delete;
+		hists__output_resort(&session[i]->hists);
 	}
 
-	hists__output_resort(&session[1]->hists);
 	if (show_displacement)
-		hists__set_positions(&session[0]->hists);
+		hists__resort_entries(&older->hists);
 
-	hists__match(&session[0]->hists, &session[1]->hists);
-	hists__fprintf(&session[1]->hists, &session[0]->hists,
+	hists__match(&older->hists, &newer->hists);
+	hists__fprintf(&newer->hists, &older->hists,
 		       show_displacement, true, 0, 0, stdout);
 out_delete:
 	for (i = 0; i < 2; ++i)
 		perf_session__delete(session[i]);
 	return ret;
+#undef older
+#undef newer
 }
 
 static const char * const diff_usage[] = {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e91c6e..2e31743 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -40,7 +40,7 @@
 	struct perf_tool	tool;
 	struct perf_session	*session;
 	char const		*input_name;
-	bool			force, use_tui, use_stdio;
+	bool			force, use_tui, use_gtk, use_stdio;
 	bool			hide_unresolved;
 	bool			dont_use_callchains;
 	bool			show_full_info;
@@ -50,6 +50,7 @@
 	const char		*pretty_printing_style;
 	symbol_filter_t		annotate_init;
 	const char		*cpu_list;
+	const char		*symbol_filter_str;
 	DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 };
 
@@ -400,6 +401,9 @@
 	list_for_each_entry(pos, &session->evlist->entries, node) {
 		struct hists *hists = &pos->hists;
 
+		if (pos->idx == 0)
+			hists->symbol_filter_str = rep->symbol_filter_str;
+
 		hists__collapse_resort(hists);
 		hists__output_resort(hists);
 		nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
@@ -411,8 +415,13 @@
 	}
 
 	if (use_browser > 0) {
-		perf_evlist__tui_browse_hists(session->evlist, help,
-					      NULL, NULL, 0);
+		if (use_browser == 1) {
+			perf_evlist__tui_browse_hists(session->evlist, help,
+						      NULL, NULL, 0);
+		} else if (use_browser == 2) {
+			perf_evlist__gtk_browse_hists(session->evlist, help,
+						      NULL, NULL, 0);
+		}
 	} else
 		perf_evlist__tty_browse_hists(session->evlist, rep, help);
 
@@ -569,6 +578,7 @@
 	OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
 		   "pretty printing style key: normal raw"),
 	OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
+	OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
 	OPT_BOOLEAN(0, "stdio", &report.use_stdio,
 		    "Use the stdio interface"),
 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
@@ -591,6 +601,8 @@
 		   "only consider symbols in these comms"),
 	OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
 		   "only consider these symbols"),
+	OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
+		   "only show symbols that (partially) match with this filter"),
 	OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
 		   "width[,width...]",
 		   "don't try to adjust column width, use these fixed values"),
@@ -624,6 +636,8 @@
 		use_browser = 0;
 	else if (report.use_tui)
 		use_browser = 1;
+	else if (report.use_gtk)
+		use_browser = 2;
 
 	if (report.inverted_callchain)
 		callchain_param.order = ORDER_CALLER;
@@ -660,7 +674,10 @@
 	}
 
 	if (strcmp(report.input_name, "-") != 0) {
-		setup_browser(true);
+		if (report.use_gtk)
+			perf_gtk_setup_browser(argc, argv, true);
+		else
+			setup_browser(true);
 	} else {
 		use_browser = 0;
 	}
@@ -709,11 +726,16 @@
 	} else
 		symbol_conf.exclude_other = false;
 
-	/*
-	 * Any (unrecognized) arguments left?
-	 */
-	if (argc)
-		usage_with_options(report_usage, options);
+	if (argc) {
+		/*
+		 * Special case: if there's an argument left then assume that
+		 * it's a symbol filter:
+		 */
+		if (argc > 1)
+			usage_with_options(report_usage, options);
+
+		report.symbol_filter_str = argv[0];
+	}
 
 	sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
 
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ea40e4e..c941bb6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -296,7 +296,7 @@
 	if (system_wide)
 		return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
 						group, group_fd);
-	if (!target_pid && !target_tid) {
+	if (!target_pid && !target_tid && (!group || evsel == first)) {
 		attr->disabled = 1;
 		attr->enable_on_exec = 1;
 	}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 3e087ce..1c5b980 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -13,6 +13,7 @@
 #include "util/parse-events.h"
 #include "util/symbol.h"
 #include "util/thread_map.h"
+#include "util/pmu.h"
 #include "../../include/linux/hw_breakpoint.h"
 
 #include <sys/mman.h>
@@ -650,7 +651,7 @@
 
 	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
-	TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+	TEST_ASSERT_VAL("wrong config", 0x1a == evsel->attr.config);
 	return 0;
 }
 
@@ -677,6 +678,24 @@
 	return 0;
 }
 
+static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+	TEST_ASSERT_VAL("wrong config",
+			PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+	TEST_ASSERT_VAL("wrong period",
+			100000 == evsel->attr.sample_period);
+	TEST_ASSERT_VAL("wrong config1",
+			0 == evsel->attr.config1);
+	TEST_ASSERT_VAL("wrong config2",
+			1 == evsel->attr.config2);
+	return 0;
+}
+
 static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
 {
 	struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -858,6 +877,115 @@
 	return test__checkevent_genhw(evlist);
 }
 
+static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+	return test__checkevent_breakpoint(evlist);
+}
+
+static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+	return test__checkevent_breakpoint_x(evlist);
+}
+
+static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+	return test__checkevent_breakpoint_r(evlist);
+}
+
+static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+	return test__checkevent_breakpoint_w(evlist);
+}
+
+static int test__checkevent_pmu(struct perf_evlist *evlist)
+{
+
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+	TEST_ASSERT_VAL("wrong config",    10 == evsel->attr.config);
+	TEST_ASSERT_VAL("wrong config1",    1 == evsel->attr.config1);
+	TEST_ASSERT_VAL("wrong config2",    3 == evsel->attr.config2);
+	TEST_ASSERT_VAL("wrong period",  1000 == evsel->attr.sample_period);
+
+	return 0;
+}
+
+static int test__checkevent_list(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+
+	TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
+
+	/* r1 */
+	evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+	TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+	TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1);
+	TEST_ASSERT_VAL("wrong config2", 0 == evsel->attr.config2);
+	TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+	/* syscalls:sys_enter_open:k */
+	evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+	TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
+	TEST_ASSERT_VAL("wrong sample_type",
+		(PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
+		evsel->attr.sample_type);
+	TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+	/* 1:1:hp */
+	evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+	TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
+	TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+	return 0;
+}
+
 static struct test__event_st {
 	const char *name;
 	__u32 type;
@@ -872,7 +1000,7 @@
 		.check = test__checkevent_tracepoint_multi,
 	},
 	{
-		.name  = "r1",
+		.name  = "r1a",
 		.check = test__checkevent_raw,
 	},
 	{
@@ -884,6 +1012,10 @@
 		.check = test__checkevent_symbolic_name,
 	},
 	{
+		.name  = "cycles/period=100000,config2/",
+		.check = test__checkevent_symbolic_name_config,
+	},
+	{
 		.name  = "faults",
 		.check = test__checkevent_symbolic_alias,
 	},
@@ -916,7 +1048,7 @@
 		.check = test__checkevent_tracepoint_multi_modifier,
 	},
 	{
-		.name  = "r1:kp",
+		.name  = "r1a:kp",
 		.check = test__checkevent_raw_modifier,
 	},
 	{
@@ -935,6 +1067,30 @@
 		.name  = "L1-dcache-load-miss:kp",
 		.check = test__checkevent_genhw_modifier,
 	},
+	{
+		.name  = "mem:0:u",
+		.check = test__checkevent_breakpoint_modifier,
+	},
+	{
+		.name  = "mem:0:x:k",
+		.check = test__checkevent_breakpoint_x_modifier,
+	},
+	{
+		.name  = "mem:0:r:hp",
+		.check = test__checkevent_breakpoint_r_modifier,
+	},
+	{
+		.name  = "mem:0:w:up",
+		.check = test__checkevent_breakpoint_w_modifier,
+	},
+	{
+		.name  = "cpu/config=10,config1,config2=3,period=1000/u",
+		.check = test__checkevent_pmu,
+	},
+	{
+		.name  = "r1,syscalls:sys_enter_open:k,1:1:hp",
+		.check = test__checkevent_list,
+	},
 };
 
 #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@ -960,10 +1116,9 @@
 		}
 
 		ret = e->check(evlist);
+		perf_evlist__delete(evlist);
 		if (ret)
 			break;
-
-		perf_evlist__delete(evlist);
 	}
 
 	return ret;
@@ -1462,6 +1617,11 @@
 
 #endif
 
+static int test__perf_pmu(void)
+{
+	return perf_pmu__test();
+}
+
 static struct test {
 	const char *desc;
 	int (*func)(void);
@@ -1497,6 +1657,10 @@
 		.func = test__PERF_RECORD,
 	},
 	{
+		.desc = "Test perf pmu format parsing",
+		.func = test__perf_pmu,
+	},
+	{
 		.func = NULL,
 	},
 };
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 6170fd2..d9084e0 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -65,6 +65,21 @@
 endef
 endif
 
+ifndef NO_GTK2
+define SOURCE_GTK2
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error \"-Wstrict-prototypes\"
+
+int main(int argc, char *argv[])
+{
+        gtk_init(&argc, &argv);
+
+        return 0;
+}
+endef
+endif
+
 ifndef NO_LIBPERL
 define SOURCE_PERL_EMBED
 #include <EXTERN.h>
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e5a462f..199f69e 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -28,8 +28,8 @@
 int symbol__alloc_hist(struct symbol *sym)
 {
 	struct annotation *notes = symbol__annotation(sym);
-	size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
-				  (sym->end - sym->start) * sizeof(u64));
+	const size_t size = sym->end - sym->start + 1;
+	size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
 
 	notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
 	if (notes->src == NULL)
@@ -64,7 +64,7 @@
 
 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
 
-	if (addr >= sym->end)
+	if (addr > sym->end)
 		return 0;
 
 	offset = addr - sym->start;
@@ -408,7 +408,7 @@
 	if (!notes->src->lines)
 		return -1;
 
-	start = map->unmap_ip(map, sym->start);
+	start = map__rip_2objdump(map, sym->start);
 
 	for (i = 0; i < len; i++) {
 		char *path = NULL;
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index fc5e5a0..8dd224d 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -45,6 +45,18 @@
 void exit_browser(bool wait_for_ok);
 #endif
 
+#ifdef NO_GTK2_SUPPORT
+static inline void perf_gtk_setup_browser(int argc __used, const char *argv[] __used, bool fallback_to_pager)
+{
+	if (fallback_to_pager)
+		setup_pager();
+}
+static inline void perf_gtk_exit_browser(bool wait_for_ok __used) {}
+#else
+void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager);
+void perf_gtk_exit_browser(bool wait_for_ok);
+#endif
+
 char *alias_lookup(const char *alias);
 int split_cmdline(char *cmdline, const char ***argv);
 
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 159263d..1986d80 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -51,13 +51,15 @@
 void perf_evlist__config_attrs(struct perf_evlist *evlist,
 			       struct perf_record_opts *opts)
 {
-	struct perf_evsel *evsel;
+	struct perf_evsel *evsel, *first;
 
 	if (evlist->cpus->map[0] < 0)
 		opts->no_inherit = true;
 
+	first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
 	list_for_each_entry(evsel, &evlist->entries, node) {
-		perf_evsel__config(evsel, opts);
+		perf_evsel__config(evsel, opts, first);
 
 		if (evlist->nr_entries > 1)
 			evsel->attr.sample_type |= PERF_SAMPLE_ID;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f421f7c..8c13dbc 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -34,7 +34,7 @@
 	return size;
 }
 
-static void hists__init(struct hists *hists)
+void hists__init(struct hists *hists)
 {
 	memset(hists, 0, sizeof(*hists));
 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
@@ -63,7 +63,8 @@
 	return evsel;
 }
 
-void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
+void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
+			struct perf_evsel *first)
 {
 	struct perf_event_attr *attr = &evsel->attr;
 	int track = !evsel->idx; /* only the first counter needs these */
@@ -134,7 +135,8 @@
 	attr->mmap = track;
 	attr->comm = track;
 
-	if (!opts->target_pid && !opts->target_tid && !opts->system_wide) {
+	if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
+	    (!opts->group || evsel == first)) {
 		attr->disabled = 1;
 		attr->enable_on_exec = 1;
 	}
@@ -578,6 +580,8 @@
 			return -EFAULT;
 
 		data->raw_data = (void *) pdata;
+
+		array = (void *)array + data->raw_size + sizeof(u32);
 	}
 
 	if (type & PERF_SAMPLE_BRANCH_STACK) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 326b8e4..3d6b3e4 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -80,7 +80,8 @@
 void perf_evsel__delete(struct perf_evsel *evsel);
 
 void perf_evsel__config(struct perf_evsel *evsel,
-			struct perf_record_opts *opts);
+			struct perf_record_opts *opts,
+			struct perf_evsel *first);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
@@ -169,4 +170,6 @@
 	return __perf_evsel__sample_size(evsel->attr.sample_type);
 }
 
+void hists__init(struct hists *hists);
+
 #endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/gtk/browser.c b/tools/perf/util/gtk/browser.c
new file mode 100644
index 0000000..258352a
--- /dev/null
+++ b/tools/perf/util/gtk/browser.c
@@ -0,0 +1,189 @@
+#include "../evlist.h"
+#include "../cache.h"
+#include "../evsel.h"
+#include "../sort.h"
+#include "../hist.h"
+#include "gtk.h"
+
+#include <signal.h>
+
+#define MAX_COLUMNS			32
+
+void perf_gtk_setup_browser(int argc, const char *argv[],
+			    bool fallback_to_pager __used)
+{
+	gtk_init(&argc, (char ***)&argv);
+}
+
+void perf_gtk_exit_browser(bool wait_for_ok __used)
+{
+	gtk_main_quit();
+}
+
+static void perf_gtk_signal(int sig)
+{
+	psignal(sig, "perf");
+	gtk_main_quit();
+}
+
+static void perf_gtk_resize_window(GtkWidget *window)
+{
+	GdkRectangle rect;
+	GdkScreen *screen;
+	int monitor;
+	int height;
+	int width;
+
+	screen = gtk_widget_get_screen(window);
+
+	monitor = gdk_screen_get_monitor_at_window(screen, window->window);
+
+	gdk_screen_get_monitor_geometry(screen, monitor, &rect);
+
+	width	= rect.width * 3 / 4;
+	height	= rect.height * 3 / 4;
+
+	gtk_window_resize(GTK_WINDOW(window), width, height);
+}
+
+static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists)
+{
+	GType col_types[MAX_COLUMNS];
+	GtkCellRenderer *renderer;
+	struct sort_entry *se;
+	GtkListStore *store;
+	struct rb_node *nd;
+	u64 total_period;
+	GtkWidget *view;
+	int col_idx;
+	int nr_cols;
+
+	nr_cols = 0;
+
+	/* The percentage column */
+	col_types[nr_cols++] = G_TYPE_STRING;
+
+	list_for_each_entry(se, &hist_entry__sort_list, list) {
+		if (se->elide)
+			continue;
+
+		col_types[nr_cols++] = G_TYPE_STRING;
+	}
+
+	store = gtk_list_store_newv(nr_cols, col_types);
+
+	view = gtk_tree_view_new();
+
+	renderer = gtk_cell_renderer_text_new();
+
+	col_idx = 0;
+
+	/* The percentage column */
+	gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+						    -1, "Overhead (%)",
+						    renderer, "text",
+						    col_idx++, NULL);
+
+	list_for_each_entry(se, &hist_entry__sort_list, list) {
+		if (se->elide)
+			continue;
+
+		gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+							    -1, se->se_header,
+							    renderer, "text",
+							    col_idx++, NULL);
+	}
+
+	gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
+
+	g_object_unref(GTK_TREE_MODEL(store));
+
+	total_period = hists->stats.total_period;
+
+	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+		GtkTreeIter iter;
+		double percent;
+		char s[512];
+
+		if (h->filtered)
+			continue;
+
+		gtk_list_store_append(store, &iter);
+
+		col_idx = 0;
+
+		percent = (h->period * 100.0) / total_period;
+
+		snprintf(s, ARRAY_SIZE(s), "%.2f", percent);
+
+		gtk_list_store_set(store, &iter, col_idx++, s, -1);
+
+		list_for_each_entry(se, &hist_entry__sort_list, list) {
+			if (se->elide)
+				continue;
+
+			se->se_snprintf(h, s, ARRAY_SIZE(s),
+					hists__col_len(hists, se->se_width_idx));
+
+			gtk_list_store_set(store, &iter, col_idx++, s, -1);
+		}
+	}
+
+	gtk_container_add(GTK_CONTAINER(window), view);
+}
+
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
+				  const char *help __used,
+				  void (*timer) (void *arg)__used,
+				  void *arg __used, int delay_secs __used)
+{
+	struct perf_evsel *pos;
+	GtkWidget *notebook;
+	GtkWidget *window;
+
+	signal(SIGSEGV, perf_gtk_signal);
+	signal(SIGFPE,  perf_gtk_signal);
+	signal(SIGINT,  perf_gtk_signal);
+	signal(SIGQUIT, perf_gtk_signal);
+	signal(SIGTERM, perf_gtk_signal);
+
+	window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
+
+	gtk_window_set_title(GTK_WINDOW(window), "perf report");
+
+	g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
+
+	notebook = gtk_notebook_new();
+
+	list_for_each_entry(pos, &evlist->entries, node) {
+		struct hists *hists = &pos->hists;
+		const char *evname = event_name(pos);
+		GtkWidget *scrolled_window;
+		GtkWidget *tab_label;
+
+		scrolled_window = gtk_scrolled_window_new(NULL, NULL);
+
+		gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
+							GTK_POLICY_AUTOMATIC,
+							GTK_POLICY_AUTOMATIC);
+
+		perf_gtk_show_hists(scrolled_window, hists);
+
+		tab_label = gtk_label_new(evname);
+
+		gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
+	}
+
+	gtk_container_add(GTK_CONTAINER(window), notebook);
+
+	gtk_widget_show_all(window);
+
+	perf_gtk_resize_window(window);
+
+	gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+
+	gtk_main();
+
+	return 0;
+}
diff --git a/tools/perf/util/gtk/gtk.h b/tools/perf/util/gtk/gtk.h
new file mode 100644
index 0000000..75177ee
--- /dev/null
+++ b/tools/perf/util/gtk/gtk.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_GTK_H_
+#define _PERF_GTK_H_ 1
+
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error "-Wstrict-prototypes"
+
+#endif /* _PERF_GTK_H_ */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index fcd9cf3..4c7c2d7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1177,7 +1177,7 @@
 		goto error;
 
 	msz = sizeof(attr);
-	if (sz < (ssize_t)msz)
+	if (sz < msz)
 		msz = sz;
 
 	for (i = 0 ; i < nre; i++) {
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3dc99a9..2ec4b60 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -10,11 +10,14 @@
 				       struct hist_entry *he);
 static bool hists__filter_entry_by_thread(struct hists *hists,
 					  struct hist_entry *he);
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+					  struct hist_entry *he);
 
 enum hist_filter {
 	HIST_FILTER__DSO,
 	HIST_FILTER__THREAD,
 	HIST_FILTER__PARENT,
+	HIST_FILTER__SYMBOL,
 };
 
 struct callchain_param	callchain_param = {
@@ -420,6 +423,7 @@
 {
 	hists__filter_entry_by_dso(hists, he);
 	hists__filter_entry_by_thread(hists, he);
+	hists__filter_entry_by_symbol(hists, he);
 }
 
 static void __hists__collapse_resort(struct hists *hists, bool threaded)
@@ -603,7 +607,7 @@
 	rem_hits.ms.sym = rem_sq_bracket;
 }
 
-static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
 					 u64 total_samples, int depth,
 					 int depth_mask, int left_margin)
 {
@@ -611,21 +615,16 @@
 	struct callchain_node *child;
 	struct callchain_list *chain;
 	int new_depth_mask = depth_mask;
-	u64 new_total;
 	u64 remaining;
 	size_t ret = 0;
 	int i;
 	uint entries_printed = 0;
 
-	if (callchain_param.mode == CHAIN_GRAPH_REL)
-		new_total = self->children_hit;
-	else
-		new_total = total_samples;
+	remaining = total_samples;
 
-	remaining = new_total;
-
-	node = rb_first(&self->rb_root);
+	node = rb_first(root);
 	while (node) {
+		u64 new_total;
 		u64 cumul;
 
 		child = rb_entry(node, struct callchain_node, rb_node);
@@ -653,11 +652,17 @@
 		list_for_each_entry(chain, &child->val, list) {
 			ret += ipchain__fprintf_graph(fp, chain, depth,
 						      new_depth_mask, i++,
-						      new_total,
+						      total_samples,
 						      cumul,
 						      left_margin);
 		}
-		ret += __callchain__fprintf_graph(fp, child, new_total,
+
+		if (callchain_param.mode == CHAIN_GRAPH_REL)
+			new_total = child->children_hit;
+		else
+			new_total = total_samples;
+
+		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
 						  depth + 1,
 						  new_depth_mask | (1 << depth),
 						  left_margin);
@@ -667,61 +672,75 @@
 	}
 
 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
-		remaining && remaining != new_total) {
+		remaining && remaining != total_samples) {
 
 		if (!rem_sq_bracket)
 			return ret;
 
 		new_depth_mask &= ~(1 << (depth - 1));
-
 		ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
-					      new_depth_mask, 0, new_total,
+					      new_depth_mask, 0, total_samples,
 					      remaining, left_margin);
 	}
 
 	return ret;
 }
 
-static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
 				       u64 total_samples, int left_margin)
 {
+	struct callchain_node *cnode;
 	struct callchain_list *chain;
-	bool printed = false;
-	int i = 0;
-	int ret = 0;
 	u32 entries_printed = 0;
+	bool printed = false;
+	struct rb_node *node;
+	int i = 0;
+	int ret;
 
-	list_for_each_entry(chain, &self->val, list) {
-		if (!i++ && sort__first_dimension == SORT_SYM)
-			continue;
+	/*
+	 * If have one single callchain root, don't bother printing
+	 * its percentage (100 % in fractal mode and the same percentage
+	 * than the hist in graph mode). This also avoid one level of column.
+	 */
+	node = rb_first(root);
+	if (node && !rb_next(node)) {
+		cnode = rb_entry(node, struct callchain_node, rb_node);
+		list_for_each_entry(chain, &cnode->val, list) {
+			/*
+			 * If we sort by symbol, the first entry is the same than
+			 * the symbol. No need to print it otherwise it appears as
+			 * displayed twice.
+			 */
+			if (!i++ && sort__first_dimension == SORT_SYM)
+				continue;
+			if (!printed) {
+				ret += callchain__fprintf_left_margin(fp, left_margin);
+				ret += fprintf(fp, "|\n");
+				ret += callchain__fprintf_left_margin(fp, left_margin);
+				ret += fprintf(fp, "---");
+				left_margin += 3;
+				printed = true;
+			} else
+				ret += callchain__fprintf_left_margin(fp, left_margin);
 
-		if (!printed) {
-			ret += callchain__fprintf_left_margin(fp, left_margin);
-			ret += fprintf(fp, "|\n");
-			ret += callchain__fprintf_left_margin(fp, left_margin);
-			ret += fprintf(fp, "---");
+			if (chain->ms.sym)
+				ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+			else
+				ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
 
-			left_margin += 3;
-			printed = true;
-		} else
-			ret += callchain__fprintf_left_margin(fp, left_margin);
-
-		if (chain->ms.sym)
-			ret += fprintf(fp, " %s\n", chain->ms.sym->name);
-		else
-			ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
-
-		if (++entries_printed == callchain_param.print_limit)
-			break;
+			if (++entries_printed == callchain_param.print_limit)
+				break;
+		}
+		root = &cnode->rb_root;
 	}
 
-	ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
-
-	return ret;
+	return __callchain__fprintf_graph(fp, root, total_samples,
+					  1, 1, left_margin);
 }
 
-static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
-				      u64 total_samples)
+static size_t __callchain__fprintf_flat(FILE *fp,
+					struct callchain_node *self,
+					u64 total_samples)
 {
 	struct callchain_list *chain;
 	size_t ret = 0;
@@ -729,7 +748,7 @@
 	if (!self)
 		return 0;
 
-	ret += callchain__fprintf_flat(fp, self->parent, total_samples);
+	ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
 
 
 	list_for_each_entry(chain, &self->val, list) {
@@ -745,44 +764,58 @@
 	return ret;
 }
 
-static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
-					    u64 total_samples, int left_margin,
-					    FILE *fp)
+static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
+				      u64 total_samples)
 {
-	struct rb_node *rb_node;
-	struct callchain_node *chain;
 	size_t ret = 0;
 	u32 entries_printed = 0;
+	struct rb_node *rb_node;
+	struct callchain_node *chain;
 
-	rb_node = rb_first(&he->sorted_chain);
+	rb_node = rb_first(self);
 	while (rb_node) {
 		double percent;
 
 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
 		percent = chain->hit * 100.0 / total_samples;
-		switch (callchain_param.mode) {
-		case CHAIN_FLAT:
-			ret += percent_color_fprintf(fp, "           %6.2f%%\n",
-						     percent);
-			ret += callchain__fprintf_flat(fp, chain, total_samples);
-			break;
-		case CHAIN_GRAPH_ABS: /* Falldown */
-		case CHAIN_GRAPH_REL:
-			ret += callchain__fprintf_graph(fp, chain, total_samples,
-							left_margin);
-		case CHAIN_NONE:
-		default:
-			break;
-		}
+
+		ret = percent_color_fprintf(fp, "           %6.2f%%\n", percent);
+		ret += __callchain__fprintf_flat(fp, chain, total_samples);
 		ret += fprintf(fp, "\n");
 		if (++entries_printed == callchain_param.print_limit)
 			break;
+
 		rb_node = rb_next(rb_node);
 	}
 
 	return ret;
 }
 
+static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
+					    u64 total_samples, int left_margin,
+					    FILE *fp)
+{
+	switch (callchain_param.mode) {
+	case CHAIN_GRAPH_REL:
+		return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
+						left_margin);
+		break;
+	case CHAIN_GRAPH_ABS:
+		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
+						left_margin);
+		break;
+	case CHAIN_FLAT:
+		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
+		break;
+	case CHAIN_NONE:
+		break;
+	default:
+		pr_err("Bad callchain mode\n");
+	}
+
+	return 0;
+}
+
 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
 {
 	struct rb_node *next = rb_first(&hists->entries);
@@ -887,9 +920,9 @@
 		diff = new_percent - old_percent;
 
 		if (fabs(diff) >= 0.01)
-			ret += scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
+			scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
 		else
-			ret += scnprintf(bf, sizeof(bf), " ");
+			scnprintf(bf, sizeof(bf), " ");
 
 		if (sep)
 			ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -898,9 +931,9 @@
 
 		if (show_displacement) {
 			if (displacement)
-				ret += scnprintf(bf, sizeof(bf), "%+4ld", displacement);
+				scnprintf(bf, sizeof(bf), "%+4ld", displacement);
 			else
-				ret += scnprintf(bf, sizeof(bf), " ");
+				scnprintf(bf, sizeof(bf), " ");
 
 			if (sep)
 				ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -1247,6 +1280,37 @@
 	}
 }
 
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+					  struct hist_entry *he)
+{
+	if (hists->symbol_filter_str != NULL &&
+	    (!he->ms.sym || strstr(he->ms.sym->name,
+				   hists->symbol_filter_str) == NULL)) {
+		he->filtered |= (1 << HIST_FILTER__SYMBOL);
+		return true;
+	}
+
+	return false;
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+	struct rb_node *nd;
+
+	hists->nr_entries = hists->stats.total_period = 0;
+	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+	hists__reset_col_len(hists);
+
+	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+		if (hists__filter_entry_by_symbol(hists, h))
+			continue;
+
+		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
+	}
+}
+
 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
 {
 	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 9413f3e..2cae9df 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -62,6 +62,7 @@
 	const struct thread	*thread_filter;
 	const struct dso	*dso_filter;
 	const char		*uid_filter_str;
+	const char		*symbol_filter_str;
 	pthread_mutex_t		lock;
 	struct events_stats	stats;
 	u64			event_stream;
@@ -107,6 +108,7 @@
 
 void hists__filter_by_dso(struct hists *hists);
 void hists__filter_by_thread(struct hists *hists);
+void hists__filter_by_symbol(struct hists *hists);
 
 u16 hists__col_len(struct hists *self, enum hist_column col);
 void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
@@ -145,6 +147,23 @@
 				  int refresh);
 #endif
 
+#ifdef NO_GTK2_SUPPORT
+static inline
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used,
+				  const char *help __used,
+				  void(*timer)(void *arg) __used,
+				  void *arg __used,
+				  int refresh __used)
+{
+	return 0;
+}
+
+#else
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
+				  void(*timer)(void *arg), void *arg,
+				  int refresh);
+#endif
+
 unsigned int hists__sort_list_width(struct hists *self);
 
 #endif	/* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/module.h b/tools/perf/util/include/linux/export.h
similarity index 100%
rename from tools/perf/util/include/linux/module.h
rename to tools/perf/util/include/linux/export.h
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c7a6f6f..5b3a0ef 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -11,6 +11,10 @@
 #include "cache.h"
 #include "header.h"
 #include "debugfs.h"
+#include "parse-events-flex.h"
+#include "pmu.h"
+
+#define MAX_NAME_LEN 100
 
 struct event_symbol {
 	u8		type;
@@ -19,11 +23,8 @@
 	const char	*alias;
 };
 
-enum event_result {
-	EVT_FAILED,
-	EVT_HANDLED,
-	EVT_HANDLED_ALL
-};
+int parse_events_parse(struct list_head *list, struct list_head *list_tmp,
+		       int *idx);
 
 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
@@ -354,7 +355,24 @@
 	return "unknown";
 }
 
-static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
+static int add_event(struct list_head *list, int *idx,
+		     struct perf_event_attr *attr, char *name)
+{
+	struct perf_evsel *evsel;
+
+	event_attr_init(attr);
+
+	evsel = perf_evsel__new(attr, (*idx)++);
+	if (!evsel)
+		return -ENOMEM;
+
+	list_add_tail(&evsel->node, list);
+
+	evsel->name = strdup(name);
+	return 0;
+}
+
+static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
 {
 	int i, j;
 	int n, longest = -1;
@@ -362,58 +380,57 @@
 	for (i = 0; i < size; i++) {
 		for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
 			n = strlen(names[i][j]);
-			if (n > longest && !strncasecmp(*str, names[i][j], n))
+			if (n > longest && !strncasecmp(str, names[i][j], n))
 				longest = n;
 		}
-		if (longest > 0) {
-			*str += longest;
+		if (longest > 0)
 			return i;
-		}
 	}
 
 	return -1;
 }
 
-static enum event_result
-parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
+int parse_events_add_cache(struct list_head *list, int *idx,
+			   char *type, char *op_result1, char *op_result2)
 {
-	const char *s = *str;
+	struct perf_event_attr attr;
+	char name[MAX_NAME_LEN];
 	int cache_type = -1, cache_op = -1, cache_result = -1;
+	char *op_result[2] = { op_result1, op_result2 };
+	int i, n;
 
-	cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
 	/*
 	 * No fallback - if we cannot get a clear cache type
 	 * then bail out:
 	 */
+	cache_type = parse_aliases(type, hw_cache,
+				   PERF_COUNT_HW_CACHE_MAX);
 	if (cache_type == -1)
-		return EVT_FAILED;
+		return -EINVAL;
 
-	while ((cache_op == -1 || cache_result == -1) && *s == '-') {
-		++s;
+	n = snprintf(name, MAX_NAME_LEN, "%s", type);
+
+	for (i = 0; (i < 2) && (op_result[i]); i++) {
+		char *str = op_result[i];
+
+		snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
 
 		if (cache_op == -1) {
-			cache_op = parse_aliases(&s, hw_cache_op,
-						PERF_COUNT_HW_CACHE_OP_MAX);
+			cache_op = parse_aliases(str, hw_cache_op,
+						 PERF_COUNT_HW_CACHE_OP_MAX);
 			if (cache_op >= 0) {
 				if (!is_cache_op_valid(cache_type, cache_op))
-					return EVT_FAILED;
+					return -EINVAL;
 				continue;
 			}
 		}
 
 		if (cache_result == -1) {
-			cache_result = parse_aliases(&s, hw_cache_result,
+			cache_result = parse_aliases(str, hw_cache_result,
 						PERF_COUNT_HW_CACHE_RESULT_MAX);
 			if (cache_result >= 0)
 				continue;
 		}
-
-		/*
-		 * Can't parse this as a cache op or result, so back up
-		 * to the '-'.
-		 */
-		--s;
-		break;
 	}
 
 	/*
@@ -428,20 +445,17 @@
 	if (cache_result == -1)
 		cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
 
-	attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
-	attr->type = PERF_TYPE_HW_CACHE;
-
-	*str = s;
-	return EVT_HANDLED;
+	memset(&attr, 0, sizeof(attr));
+	attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
+	attr.type = PERF_TYPE_HW_CACHE;
+	return add_event(list, idx, &attr, name);
 }
 
-static enum event_result
-parse_single_tracepoint_event(char *sys_name,
-			      const char *evt_name,
-			      unsigned int evt_length,
-			      struct perf_event_attr *attr,
-			      const char **strp)
+static int add_tracepoint(struct list_head *list, int *idx,
+			  char *sys_name, char *evt_name)
 {
+	struct perf_event_attr attr;
+	char name[MAX_NAME_LEN];
 	char evt_path[MAXPATHLEN];
 	char id_buf[4];
 	u64 id;
@@ -452,130 +466,80 @@
 
 	fd = open(evt_path, O_RDONLY);
 	if (fd < 0)
-		return EVT_FAILED;
+		return -1;
 
 	if (read(fd, id_buf, sizeof(id_buf)) < 0) {
 		close(fd);
-		return EVT_FAILED;
+		return -1;
 	}
 
 	close(fd);
 	id = atoll(id_buf);
-	attr->config = id;
-	attr->type = PERF_TYPE_TRACEPOINT;
-	*strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */
 
-	attr->sample_type |= PERF_SAMPLE_RAW;
-	attr->sample_type |= PERF_SAMPLE_TIME;
-	attr->sample_type |= PERF_SAMPLE_CPU;
+	memset(&attr, 0, sizeof(attr));
+	attr.config = id;
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.sample_type |= PERF_SAMPLE_RAW;
+	attr.sample_type |= PERF_SAMPLE_TIME;
+	attr.sample_type |= PERF_SAMPLE_CPU;
+	attr.sample_period = 1;
 
-	attr->sample_period = 1;
-
-
-	return EVT_HANDLED;
+	snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
+	return add_event(list, idx, &attr, name);
 }
 
-/* sys + ':' + event + ':' + flags*/
-#define MAX_EVOPT_LEN	(MAX_EVENT_LENGTH * 2 + 2 + 128)
-static enum event_result
-parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name,
-				const char *evt_exp, char *flags)
+static int add_tracepoint_multi(struct list_head *list, int *idx,
+				char *sys_name, char *evt_name)
 {
 	char evt_path[MAXPATHLEN];
 	struct dirent *evt_ent;
 	DIR *evt_dir;
+	int ret = 0;
 
 	snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
 	evt_dir = opendir(evt_path);
-
 	if (!evt_dir) {
 		perror("Can't open event dir");
-		return EVT_FAILED;
+		return -1;
 	}
 
-	while ((evt_ent = readdir(evt_dir))) {
-		char event_opt[MAX_EVOPT_LEN + 1];
-		int len;
-
+	while (!ret && (evt_ent = readdir(evt_dir))) {
 		if (!strcmp(evt_ent->d_name, ".")
 		    || !strcmp(evt_ent->d_name, "..")
 		    || !strcmp(evt_ent->d_name, "enable")
 		    || !strcmp(evt_ent->d_name, "filter"))
 			continue;
 
-		if (!strglobmatch(evt_ent->d_name, evt_exp))
+		if (!strglobmatch(evt_ent->d_name, evt_name))
 			continue;
 
-		len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
-			       evt_ent->d_name, flags ? ":" : "",
-			       flags ?: "");
-		if (len < 0)
-			return EVT_FAILED;
-
-		if (parse_events(evlist, event_opt, 0))
-			return EVT_FAILED;
+		ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
 	}
 
-	return EVT_HANDLED_ALL;
+	return ret;
 }
 
-static enum event_result
-parse_tracepoint_event(struct perf_evlist *evlist, const char **strp,
-		       struct perf_event_attr *attr)
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+				char *sys, char *event)
 {
-	const char *evt_name;
-	char *flags = NULL, *comma_loc;
-	char sys_name[MAX_EVENT_LENGTH];
-	unsigned int sys_length, evt_length;
+	int ret;
 
-	if (debugfs_valid_mountpoint(tracing_events_path))
-		return 0;
+	ret = debugfs_valid_mountpoint(tracing_events_path);
+	if (ret)
+		return ret;
 
-	evt_name = strchr(*strp, ':');
-	if (!evt_name)
-		return EVT_FAILED;
-
-	sys_length = evt_name - *strp;
-	if (sys_length >= MAX_EVENT_LENGTH)
-		return 0;
-
-	strncpy(sys_name, *strp, sys_length);
-	sys_name[sys_length] = '\0';
-	evt_name = evt_name + 1;
-
-	comma_loc = strchr(evt_name, ',');
-	if (comma_loc) {
-		/* take the event name up to the comma */
-		evt_name = strndup(evt_name, comma_loc - evt_name);
-	}
-	flags = strchr(evt_name, ':');
-	if (flags) {
-		/* split it out: */
-		evt_name = strndup(evt_name, flags - evt_name);
-		flags++;
-	}
-
-	evt_length = strlen(evt_name);
-	if (evt_length >= MAX_EVENT_LENGTH)
-		return EVT_FAILED;
-	if (strpbrk(evt_name, "*?")) {
-		*strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
-		return parse_multiple_tracepoint_event(evlist, sys_name,
-						       evt_name, flags);
-	} else {
-		return parse_single_tracepoint_event(sys_name, evt_name,
-						     evt_length, attr, strp);
-	}
+	return strpbrk(event, "*?") ?
+	       add_tracepoint_multi(list, idx, sys, event) :
+	       add_tracepoint(list, idx, sys, event);
 }
 
-static enum event_result
-parse_breakpoint_type(const char *type, const char **strp,
-		      struct perf_event_attr *attr)
+static int
+parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
 {
 	int i;
 
 	for (i = 0; i < 3; i++) {
-		if (!type[i])
+		if (!type || !type[i])
 			break;
 
 		switch (type[i]) {
@@ -589,164 +553,146 @@
 			attr->bp_type |= HW_BREAKPOINT_X;
 			break;
 		default:
-			return EVT_FAILED;
+			return -EINVAL;
 		}
 	}
+
 	if (!attr->bp_type) /* Default */
 		attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
 
-	*strp = type + i;
-
-	return EVT_HANDLED;
+	return 0;
 }
 
-static enum event_result
-parse_breakpoint_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+				void *ptr, char *type)
 {
-	const char *target;
-	const char *type;
-	char *endaddr;
-	u64 addr;
-	enum event_result err;
+	struct perf_event_attr attr;
+	char name[MAX_NAME_LEN];
 
-	target = strchr(*strp, ':');
-	if (!target)
-		return EVT_FAILED;
+	memset(&attr, 0, sizeof(attr));
+	attr.bp_addr = (unsigned long) ptr;
 
-	if (strncmp(*strp, "mem", target - *strp) != 0)
-		return EVT_FAILED;
-
-	target++;
-
-	addr = strtoull(target, &endaddr, 0);
-	if (target == endaddr)
-		return EVT_FAILED;
-
-	attr->bp_addr = addr;
-	*strp = endaddr;
-
-	type = strchr(target, ':');
-
-	/* If no type is defined, just rw as default */
-	if (!type) {
-		attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
-	} else {
-		err = parse_breakpoint_type(++type, strp, attr);
-		if (err == EVT_FAILED)
-			return EVT_FAILED;
-	}
+	if (parse_breakpoint_type(type, &attr))
+		return -EINVAL;
 
 	/*
 	 * We should find a nice way to override the access length
 	 * Provide some defaults for now
 	 */
-	if (attr->bp_type == HW_BREAKPOINT_X)
-		attr->bp_len = sizeof(long);
+	if (attr.bp_type == HW_BREAKPOINT_X)
+		attr.bp_len = sizeof(long);
 	else
-		attr->bp_len = HW_BREAKPOINT_LEN_4;
+		attr.bp_len = HW_BREAKPOINT_LEN_4;
 
-	attr->type = PERF_TYPE_BREAKPOINT;
+	attr.type = PERF_TYPE_BREAKPOINT;
 
-	return EVT_HANDLED;
+	snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
+	return add_event(list, idx, &attr, name);
 }
 
-static int check_events(const char *str, unsigned int i)
+static int config_term(struct perf_event_attr *attr,
+		       struct parse_events__term *term)
 {
-	int n;
-
-	n = strlen(event_symbols[i].symbol);
-	if (!strncasecmp(str, event_symbols[i].symbol, n))
-		return n;
-
-	n = strlen(event_symbols[i].alias);
-	if (n) {
-		if (!strncasecmp(str, event_symbols[i].alias, n))
-			return n;
+	switch (term->type) {
+	case PARSE_EVENTS__TERM_TYPE_CONFIG:
+		attr->config = term->val.num;
+		break;
+	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+		attr->config1 = term->val.num;
+		break;
+	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+		attr->config2 = term->val.num;
+		break;
+	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+		attr->sample_period = term->val.num;
+		break;
+	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+		/*
+		 * TODO uncomment when the field is available
+		 * attr->branch_sample_type = term->val.num;
+		 */
+		break;
+	default:
+		return -EINVAL;
 	}
+	return 0;
+}
+
+static int config_attr(struct perf_event_attr *attr,
+		       struct list_head *head, int fail)
+{
+	struct parse_events__term *term;
+
+	list_for_each_entry(term, head, list)
+		if (config_term(attr, term) && fail)
+			return -EINVAL;
 
 	return 0;
 }
 
-static enum event_result
-parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_numeric(struct list_head *list, int *idx,
+			     unsigned long type, unsigned long config,
+			     struct list_head *head_config)
 {
-	const char *str = *strp;
-	unsigned int i;
-	int n;
+	struct perf_event_attr attr;
 
-	for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
-		n = check_events(str, i);
-		if (n > 0) {
-			attr->type = event_symbols[i].type;
-			attr->config = event_symbols[i].config;
-			*strp = str + n;
-			return EVT_HANDLED;
-		}
-	}
-	return EVT_FAILED;
+	memset(&attr, 0, sizeof(attr));
+	attr.type = type;
+	attr.config = config;
+
+	if (head_config &&
+	    config_attr(&attr, head_config, 1))
+		return -EINVAL;
+
+	return add_event(list, idx, &attr,
+			 (char *) __event_name(type, config));
 }
 
-static enum event_result
-parse_raw_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_pmu(struct list_head *list, int *idx,
+			 char *name, struct list_head *head_config)
 {
-	const char *str = *strp;
-	u64 config;
-	int n;
+	struct perf_event_attr attr;
+	struct perf_pmu *pmu;
 
-	if (*str != 'r')
-		return EVT_FAILED;
-	n = hex2u64(str + 1, &config);
-	if (n > 0) {
-		const char *end = str + n + 1;
-		if (*end != '\0' && *end != ',' && *end != ':')
-			return EVT_FAILED;
+	pmu = perf_pmu__find(name);
+	if (!pmu)
+		return -EINVAL;
 
-		*strp = end;
-		attr->type = PERF_TYPE_RAW;
-		attr->config = config;
-		return EVT_HANDLED;
-	}
-	return EVT_FAILED;
+	memset(&attr, 0, sizeof(attr));
+
+	/*
+	 * Configure hardcoded terms first, no need to check
+	 * return value when called with fail == 0 ;)
+	 */
+	config_attr(&attr, head_config, 0);
+
+	if (perf_pmu__config(pmu, &attr, head_config))
+		return -EINVAL;
+
+	return add_event(list, idx, &attr, (char *) "pmu");
 }
 
-static enum event_result
-parse_numeric_event(const char **strp, struct perf_event_attr *attr)
+void parse_events_update_lists(struct list_head *list_event,
+			       struct list_head *list_all)
 {
-	const char *str = *strp;
-	char *endp;
-	unsigned long type;
-	u64 config;
-
-	type = strtoul(str, &endp, 0);
-	if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
-		str = endp + 1;
-		config = strtoul(str, &endp, 0);
-		if (endp > str) {
-			attr->type = type;
-			attr->config = config;
-			*strp = endp;
-			return EVT_HANDLED;
-		}
-	}
-	return EVT_FAILED;
+	/*
+	 * Called for single event definition. Update the
+	 * 'all event' list, and reinit the 'signle event'
+	 * list, for next event definition.
+	 */
+	list_splice_tail(list_event, list_all);
+	INIT_LIST_HEAD(list_event);
 }
 
-static int
-parse_event_modifier(const char **strp, struct perf_event_attr *attr)
+int parse_events_modifier(struct list_head *list, char *str)
 {
-	const char *str = *strp;
+	struct perf_evsel *evsel;
 	int exclude = 0, exclude_GH = 0;
 	int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
 
-	if (!*str)
+	if (str == NULL)
 		return 0;
 
-	if (*str == ',')
-		return 0;
-
-	if (*str++ != ':')
-		return -1;
-
 	while (*str) {
 		if (*str == 'u') {
 			if (!exclude)
@@ -775,111 +721,62 @@
 
 		++str;
 	}
-	if (str < *strp + 2)
-		return -1;
 
-	*strp = str;
+	/*
+	 * precise ip:
+	 *
+	 *  0 - SAMPLE_IP can have arbitrary skid
+	 *  1 - SAMPLE_IP must have constant skid
+	 *  2 - SAMPLE_IP requested to have 0 skid
+	 *  3 - SAMPLE_IP must have 0 skid
+	 *
+	 *  See also PERF_RECORD_MISC_EXACT_IP
+	 */
+	if (precise > 3)
+		return -EINVAL;
 
-	attr->exclude_user   = eu;
-	attr->exclude_kernel = ek;
-	attr->exclude_hv     = eh;
-	attr->precise_ip     = precise;
-	attr->exclude_host   = eH;
-	attr->exclude_guest  = eG;
+	list_for_each_entry(evsel, list, node) {
+		evsel->attr.exclude_user   = eu;
+		evsel->attr.exclude_kernel = ek;
+		evsel->attr.exclude_hv     = eh;
+		evsel->attr.precise_ip     = precise;
+		evsel->attr.exclude_host   = eH;
+		evsel->attr.exclude_guest  = eG;
+	}
 
 	return 0;
 }
 
-/*
- * Each event can have multiple symbolic names.
- * Symbolic names are (almost) exactly matched.
- */
-static enum event_result
-parse_event_symbols(struct perf_evlist *evlist, const char **str,
-		    struct perf_event_attr *attr)
+int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
 {
-	enum event_result ret;
+	LIST_HEAD(list);
+	LIST_HEAD(list_tmp);
+	YY_BUFFER_STATE buffer;
+	int ret, idx = evlist->nr_entries;
 
-	ret = parse_tracepoint_event(evlist, str, attr);
-	if (ret != EVT_FAILED)
-		goto modifier;
+	buffer = parse_events__scan_string(str);
 
-	ret = parse_raw_event(str, attr);
-	if (ret != EVT_FAILED)
-		goto modifier;
+	ret = parse_events_parse(&list, &list_tmp, &idx);
 
-	ret = parse_numeric_event(str, attr);
-	if (ret != EVT_FAILED)
-		goto modifier;
+	parse_events__flush_buffer(buffer);
+	parse_events__delete_buffer(buffer);
 
-	ret = parse_symbolic_event(str, attr);
-	if (ret != EVT_FAILED)
-		goto modifier;
+	if (!ret) {
+		int entries = idx - evlist->nr_entries;
+		perf_evlist__splice_list_tail(evlist, &list, entries);
+		return 0;
+	}
 
-	ret = parse_generic_hw_event(str, attr);
-	if (ret != EVT_FAILED)
-		goto modifier;
-
-	ret = parse_breakpoint_event(str, attr);
-	if (ret != EVT_FAILED)
-		goto modifier;
-
-	fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
+	/*
+	 * There are 2 users - builtin-record and builtin-test objects.
+	 * Both call perf_evlist__delete in case of error, so we dont
+	 * need to bother.
+	 */
+	fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
 	fprintf(stderr, "Run 'perf list' for a list of valid events\n");
-	return EVT_FAILED;
-
-modifier:
-	if (parse_event_modifier(str, attr) < 0) {
-		fprintf(stderr, "invalid event modifier: '%s'\n", *str);
-		fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n");
-
-		return EVT_FAILED;
-	}
-
 	return ret;
 }
 
-int parse_events(struct perf_evlist *evlist , const char *str, int unset __used)
-{
-	struct perf_event_attr attr;
-	enum event_result ret;
-	const char *ostr;
-
-	for (;;) {
-		ostr = str;
-		memset(&attr, 0, sizeof(attr));
-		event_attr_init(&attr);
-		ret = parse_event_symbols(evlist, &str, &attr);
-		if (ret == EVT_FAILED)
-			return -1;
-
-		if (!(*str == 0 || *str == ',' || isspace(*str)))
-			return -1;
-
-		if (ret != EVT_HANDLED_ALL) {
-			struct perf_evsel *evsel;
-			evsel = perf_evsel__new(&attr, evlist->nr_entries);
-			if (evsel == NULL)
-				return -1;
-			perf_evlist__add(evlist, evsel);
-
-			evsel->name = calloc(str - ostr + 1, 1);
-			if (!evsel->name)
-				return -1;
-			strncpy(evsel->name, ostr, str - ostr);
-		}
-
-		if (*str == 0)
-			break;
-		if (*str == ',')
-			++str;
-		while (isspace(*str))
-			++str;
-	}
-
-	return 0;
-}
-
 int parse_events_option(const struct option *opt, const char *str,
 			int unset __used)
 {
@@ -1052,8 +949,6 @@
 	return printed;
 }
 
-#define MAX_NAME_LEN 100
-
 /*
  * Print the help text for the event symbols:
  */
@@ -1102,8 +997,12 @@
 
 	printf("\n");
 	printf("  %-50s [%s]\n",
-		"rNNN (see 'perf list --help' on how to encode it)",
+	       "rNNN",
 	       event_type_descriptors[PERF_TYPE_RAW]);
+	printf("  %-50s [%s]\n",
+	       "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
+	       event_type_descriptors[PERF_TYPE_RAW]);
+	printf("   (see 'perf list --help' on how to encode it)\n");
 	printf("\n");
 
 	printf("  %-50s [%s]\n",
@@ -1113,3 +1012,51 @@
 
 	print_tracepoint_events(NULL, NULL);
 }
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term)
+{
+	return term->type <= PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX;
+}
+
+int parse_events__new_term(struct parse_events__term **_term, int type,
+			   char *config, char *str, long num)
+{
+	struct parse_events__term *term;
+
+	term = zalloc(sizeof(*term));
+	if (!term)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&term->list);
+	term->type = type;
+	term->config = config;
+
+	switch (type) {
+	case PARSE_EVENTS__TERM_TYPE_CONFIG:
+	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+	case PARSE_EVENTS__TERM_TYPE_NUM:
+		term->val.num = num;
+		break;
+	case PARSE_EVENTS__TERM_TYPE_STR:
+		term->val.str = str;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*_term = term;
+	return 0;
+}
+
+void parse_events__free_terms(struct list_head *terms)
+{
+	struct parse_events__term *term, *h;
+
+	list_for_each_entry_safe(term, h, terms, list)
+		free(term);
+
+	free(terms);
+}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 7e0cbe7..ca069f8 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -33,6 +33,55 @@
 
 #define EVENTS_HELP_MAX (128*1024)
 
+enum {
+	PARSE_EVENTS__TERM_TYPE_CONFIG,
+	PARSE_EVENTS__TERM_TYPE_CONFIG1,
+	PARSE_EVENTS__TERM_TYPE_CONFIG2,
+	PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
+	PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+	PARSE_EVENTS__TERM_TYPE_NUM,
+	PARSE_EVENTS__TERM_TYPE_STR,
+
+	PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX =
+		PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+};
+
+struct parse_events__term {
+	char *config;
+	union {
+		char *str;
+		long  num;
+	} val;
+	int type;
+
+	struct list_head list;
+};
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term);
+int parse_events__new_term(struct parse_events__term **term, int type,
+			   char *config, char *str, long num);
+void parse_events__free_terms(struct list_head *terms);
+int parse_events_modifier(struct list_head *list __used, char *str __used);
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+				char *sys, char *event);
+int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
+			 unsigned long config1, unsigned long config2,
+			 char *mod);
+int parse_events_add_numeric(struct list_head *list, int *idx,
+			     unsigned long type, unsigned long config,
+			     struct list_head *head_config);
+int parse_events_add_cache(struct list_head *list, int *idx,
+			   char *type, char *op_result1, char *op_result2);
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+				void *ptr, char *type);
+int parse_events_add_pmu(struct list_head *list, int *idx,
+			 char *pmu , struct list_head *head_config);
+void parse_events_update_lists(struct list_head *list_event,
+			       struct list_head *list_all);
+void parse_events_error(struct list_head *list_all,
+			struct list_head *list_event,
+			int *idx, char const *msg);
+
 void print_events(const char *event_glob);
 void print_events_type(u8 type);
 void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
new file mode 100644
index 0000000..05d766e
--- /dev/null
+++ b/tools/perf/util/parse-events.l
@@ -0,0 +1,127 @@
+
+%option prefix="parse_events_"
+
+%{
+#include <errno.h>
+#include "../perf.h"
+#include "parse-events-bison.h"
+#include "parse-events.h"
+
+static int __value(char *str, int base, int token)
+{
+	long num;
+
+	errno = 0;
+	num = strtoul(str, NULL, base);
+	if (errno)
+		return PE_ERROR;
+
+	parse_events_lval.num = num;
+	return token;
+}
+
+static int value(int base)
+{
+	return __value(parse_events_text, base, PE_VALUE);
+}
+
+static int raw(void)
+{
+	return __value(parse_events_text + 1, 16, PE_RAW);
+}
+
+static int str(int token)
+{
+	parse_events_lval.str = strdup(parse_events_text);
+	return token;
+}
+
+static int sym(int type, int config)
+{
+	parse_events_lval.num = (type << 16) + config;
+	return PE_VALUE_SYM;
+}
+
+static int term(int type)
+{
+	parse_events_lval.num = type;
+	return PE_TERM;
+}
+
+%}
+
+num_dec		[0-9]+
+num_hex		0x[a-fA-F0-9]+
+num_raw_hex	[a-fA-F0-9]+
+name		[a-zA-Z_*?][a-zA-Z0-9_*?]*
+modifier_event	[ukhp]{1,5}
+modifier_bp	[rwx]
+
+%%
+cpu-cycles|cycles				{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend	{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend	{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions					{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references				{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses					{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches			{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses					{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles					{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles					{ return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock					{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock					{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults				{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults					{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults					{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs				{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations			{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults				{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults				{ return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+
+L1-dcache|l1-d|l1d|L1-data		|
+L1-icache|l1-i|l1i|L1-instruction	|
+LLC|L2					|
+dTLB|d-tlb|Data-TLB			|
+iTLB|i-tlb|Instruction-TLB		|
+branch|branches|bpu|btb|bpc		|
+node					{ return str(PE_NAME_CACHE_TYPE); }
+
+load|loads|read				|
+store|stores|write			|
+prefetch|prefetches			|
+speculative-read|speculative-load	|
+refs|Reference|ops|access		|
+misses|miss				{ return str(PE_NAME_CACHE_OP_RESULT); }
+
+	/*
+	 * These are event config hardcoded term names to be specified
+	 * within xxx/.../ syntax. So far we dont clash with other names,
+	 * so we can put them here directly. In case the we have a conflict
+	 * in future, this needs to go into '//' condition block.
+	 */
+config			{ return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1			{ return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2			{ return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+period			{ return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type		{ return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+
+mem:			{ return PE_PREFIX_MEM; }
+r{num_raw_hex}		{ return raw(); }
+{num_dec}		{ return value(10); }
+{num_hex}		{ return value(16); }
+
+{modifier_event}	{ return str(PE_MODIFIER_EVENT); }
+{modifier_bp}		{ return str(PE_MODIFIER_BP); }
+{name}			{ return str(PE_NAME); }
+"/"			{ return '/'; }
+-			{ return '-'; }
+,			{ return ','; }
+:			{ return ':'; }
+=			{ return '='; }
+
+%%
+
+int parse_events_wrap(void)
+{
+	return 1;
+}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
new file mode 100644
index 0000000..d9637da
--- /dev/null
+++ b/tools/perf/util/parse-events.y
@@ -0,0 +1,229 @@
+
+%name-prefix "parse_events_"
+%parse-param {struct list_head *list_all}
+%parse-param {struct list_head *list_event}
+%parse-param {int *idx}
+
+%{
+
+#define YYDEBUG 1
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include "types.h"
+#include "util.h"
+#include "parse-events.h"
+
+extern int parse_events_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+	if (val) \
+		YYABORT; \
+} while (0)
+
+%}
+
+%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
+%token PE_NAME
+%token PE_MODIFIER_EVENT PE_MODIFIER_BP
+%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
+%token PE_PREFIX_MEM PE_PREFIX_RAW
+%token PE_ERROR
+%type <num> PE_VALUE
+%type <num> PE_VALUE_SYM
+%type <num> PE_RAW
+%type <num> PE_TERM
+%type <str> PE_NAME
+%type <str> PE_NAME_CACHE_TYPE
+%type <str> PE_NAME_CACHE_OP_RESULT
+%type <str> PE_MODIFIER_EVENT
+%type <str> PE_MODIFIER_BP
+%type <head> event_config
+%type <term> event_term
+
+%union
+{
+	char *str;
+	unsigned long num;
+	struct list_head *head;
+	struct parse_events__term *term;
+}
+%%
+
+events:
+events ',' event | event
+
+event:
+event_def PE_MODIFIER_EVENT
+{
+	/*
+	 * Apply modifier on all events added by single event definition
+	 * (there could be more events added for multiple tracepoint
+	 * definitions via '*?'.
+	 */
+	ABORT_ON(parse_events_modifier(list_event, $2));
+	parse_events_update_lists(list_event, list_all);
+}
+|
+event_def
+{
+	parse_events_update_lists(list_event, list_all);
+}
+
+event_def: event_pmu |
+	   event_legacy_symbol |
+	   event_legacy_cache sep_dc |
+	   event_legacy_mem |
+	   event_legacy_tracepoint sep_dc |
+	   event_legacy_numeric sep_dc |
+	   event_legacy_raw sep_dc
+
+event_pmu:
+PE_NAME '/' event_config '/'
+{
+	ABORT_ON(parse_events_add_pmu(list_event, idx, $1, $3));
+	parse_events__free_terms($3);
+}
+
+event_legacy_symbol:
+PE_VALUE_SYM '/' event_config '/'
+{
+	int type = $1 >> 16;
+	int config = $1 & 255;
+
+	ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, $3));
+	parse_events__free_terms($3);
+}
+|
+PE_VALUE_SYM sep_slash_dc
+{
+	int type = $1 >> 16;
+	int config = $1 & 255;
+
+	ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, NULL));
+}
+
+event_legacy_cache:
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
+{
+	ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, $5));
+}
+|
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
+{
+	ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, NULL));
+}
+|
+PE_NAME_CACHE_TYPE
+{
+	ABORT_ON(parse_events_add_cache(list_event, idx, $1, NULL, NULL));
+}
+
+event_legacy_mem:
+PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
+{
+	ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, $4));
+}
+|
+PE_PREFIX_MEM PE_VALUE sep_dc
+{
+	ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, NULL));
+}
+
+event_legacy_tracepoint:
+PE_NAME ':' PE_NAME
+{
+	ABORT_ON(parse_events_add_tracepoint(list_event, idx, $1, $3));
+}
+
+event_legacy_numeric:
+PE_VALUE ':' PE_VALUE
+{
+	ABORT_ON(parse_events_add_numeric(list_event, idx, $1, $3, NULL));
+}
+
+event_legacy_raw:
+PE_RAW
+{
+	ABORT_ON(parse_events_add_numeric(list_event, idx, PERF_TYPE_RAW, $1, NULL));
+}
+
+event_config:
+event_config ',' event_term
+{
+	struct list_head *head = $1;
+	struct parse_events__term *term = $3;
+
+	ABORT_ON(!head);
+	list_add_tail(&term->list, head);
+	$$ = $1;
+}
+|
+event_term
+{
+	struct list_head *head = malloc(sizeof(*head));
+	struct parse_events__term *term = $1;
+
+	ABORT_ON(!head);
+	INIT_LIST_HEAD(head);
+	list_add_tail(&term->list, head);
+	$$ = head;
+}
+
+event_term:
+PE_NAME '=' PE_NAME
+{
+	struct parse_events__term *term;
+
+	ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_STR,
+		 $1, $3, 0));
+	$$ = term;
+}
+|
+PE_NAME '=' PE_VALUE
+{
+	struct parse_events__term *term;
+
+	ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+		 $1, NULL, $3));
+	$$ = term;
+}
+|
+PE_NAME
+{
+	struct parse_events__term *term;
+
+	ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+		 $1, NULL, 1));
+	$$ = term;
+}
+|
+PE_TERM '=' PE_VALUE
+{
+	struct parse_events__term *term;
+
+	ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, $3));
+	$$ = term;
+}
+|
+PE_TERM
+{
+	struct parse_events__term *term;
+
+	ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, 1));
+	$$ = term;
+}
+
+sep_dc: ':' |
+
+sep_slash_dc: '/' | ':' |
+
+%%
+
+void parse_events_error(struct list_head *list_all __used,
+			struct list_head *list_event __used,
+			int *idx __used,
+			char const *msg __used)
+{
+}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
new file mode 100644
index 0000000..cb08a11
--- /dev/null
+++ b/tools/perf/util/pmu.c
@@ -0,0 +1,469 @@
+
+#include <linux/list.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <dirent.h>
+#include "sysfs.h"
+#include "util.h"
+#include "pmu.h"
+#include "parse-events.h"
+
+int perf_pmu_parse(struct list_head *list, char *name);
+extern FILE *perf_pmu_in;
+
+static LIST_HEAD(pmus);
+
+/*
+ * Parse & process all the sysfs attributes located under
+ * the directory specified in 'dir' parameter.
+ */
+static int pmu_format_parse(char *dir, struct list_head *head)
+{
+	struct dirent *evt_ent;
+	DIR *format_dir;
+	int ret = 0;
+
+	format_dir = opendir(dir);
+	if (!format_dir)
+		return -EINVAL;
+
+	while (!ret && (evt_ent = readdir(format_dir))) {
+		char path[PATH_MAX];
+		char *name = evt_ent->d_name;
+		FILE *file;
+
+		if (!strcmp(name, ".") || !strcmp(name, ".."))
+			continue;
+
+		snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+		ret = -EINVAL;
+		file = fopen(path, "r");
+		if (!file)
+			break;
+
+		perf_pmu_in = file;
+		ret = perf_pmu_parse(head, name);
+		fclose(file);
+	}
+
+	closedir(format_dir);
+	return ret;
+}
+
+/*
+ * Reading/parsing the default pmu format definition, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
+ */
+static int pmu_format(char *name, struct list_head *format)
+{
+	struct stat st;
+	char path[PATH_MAX];
+	const char *sysfs;
+
+	sysfs = sysfs_find_mountpoint();
+	if (!sysfs)
+		return -1;
+
+	snprintf(path, PATH_MAX,
+		 "%s/bus/event_source/devices/%s/format", sysfs, name);
+
+	if (stat(path, &st) < 0)
+		return -1;
+
+	if (pmu_format_parse(path, format))
+		return -1;
+
+	return 0;
+}
+
+/*
+ * Reading/parsing the default pmu type value, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
+ */
+static int pmu_type(char *name, __u32 *type)
+{
+	struct stat st;
+	char path[PATH_MAX];
+	const char *sysfs;
+	FILE *file;
+	int ret = 0;
+
+	sysfs = sysfs_find_mountpoint();
+	if (!sysfs)
+		return -1;
+
+	snprintf(path, PATH_MAX,
+		 "%s/bus/event_source/devices/%s/type", sysfs, name);
+
+	if (stat(path, &st) < 0)
+		return -1;
+
+	file = fopen(path, "r");
+	if (!file)
+		return -EINVAL;
+
+	if (1 != fscanf(file, "%u", type))
+		ret = -1;
+
+	fclose(file);
+	return ret;
+}
+
+static struct perf_pmu *pmu_lookup(char *name)
+{
+	struct perf_pmu *pmu;
+	LIST_HEAD(format);
+	__u32 type;
+
+	/*
+	 * The pmu data we store & need consists of the pmu
+	 * type value and format definitions. Load both right
+	 * now.
+	 */
+	if (pmu_format(name, &format))
+		return NULL;
+
+	if (pmu_type(name, &type))
+		return NULL;
+
+	pmu = zalloc(sizeof(*pmu));
+	if (!pmu)
+		return NULL;
+
+	INIT_LIST_HEAD(&pmu->format);
+	list_splice(&format, &pmu->format);
+	pmu->name = strdup(name);
+	pmu->type = type;
+	return pmu;
+}
+
+static struct perf_pmu *pmu_find(char *name)
+{
+	struct perf_pmu *pmu;
+
+	list_for_each_entry(pmu, &pmus, list)
+		if (!strcmp(pmu->name, name))
+			return pmu;
+
+	return NULL;
+}
+
+struct perf_pmu *perf_pmu__find(char *name)
+{
+	struct perf_pmu *pmu;
+
+	/*
+	 * Once PMU is loaded it stays in the list,
+	 * so we keep us from multiple reading/parsing
+	 * the pmu format definitions.
+	 */
+	pmu = pmu_find(name);
+	if (pmu)
+		return pmu;
+
+	return pmu_lookup(name);
+}
+
+static struct perf_pmu__format*
+pmu_find_format(struct list_head *formats, char *name)
+{
+	struct perf_pmu__format *format;
+
+	list_for_each_entry(format, formats, list)
+		if (!strcmp(format->name, name))
+			return format;
+
+	return NULL;
+}
+
+/*
+ * Returns value based on the format definition (format parameter)
+ * and unformated value (value parameter).
+ *
+ * TODO maybe optimize a little ;)
+ */
+static __u64 pmu_format_value(unsigned long *format, __u64 value)
+{
+	unsigned long fbit, vbit;
+	__u64 v = 0;
+
+	for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
+
+		if (!test_bit(fbit, format))
+			continue;
+
+		if (!(value & (1llu << vbit++)))
+			continue;
+
+		v |= (1llu << fbit);
+	}
+
+	return v;
+}
+
+/*
+ * Setup one of config[12] attr members based on the
+ * user input data - temr parameter.
+ */
+static int pmu_config_term(struct list_head *formats,
+			   struct perf_event_attr *attr,
+			   struct parse_events__term *term)
+{
+	struct perf_pmu__format *format;
+	__u64 *vp;
+
+	/*
+	 * Support only for hardcoded and numnerial terms.
+	 * Hardcoded terms should be already in, so nothing
+	 * to be done for them.
+	 */
+	if (parse_events__is_hardcoded_term(term))
+		return 0;
+
+	if (term->type != PARSE_EVENTS__TERM_TYPE_NUM)
+		return -EINVAL;
+
+	format = pmu_find_format(formats, term->config);
+	if (!format)
+		return -EINVAL;
+
+	switch (format->value) {
+	case PERF_PMU_FORMAT_VALUE_CONFIG:
+		vp = &attr->config;
+		break;
+	case PERF_PMU_FORMAT_VALUE_CONFIG1:
+		vp = &attr->config1;
+		break;
+	case PERF_PMU_FORMAT_VALUE_CONFIG2:
+		vp = &attr->config2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*vp |= pmu_format_value(format->bits, term->val.num);
+	return 0;
+}
+
+static int pmu_config(struct list_head *formats, struct perf_event_attr *attr,
+		      struct list_head *head_terms)
+{
+	struct parse_events__term *term, *h;
+
+	list_for_each_entry_safe(term, h, head_terms, list)
+		if (pmu_config_term(formats, attr, term))
+			return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * Configures event's 'attr' parameter based on the:
+ * 1) users input - specified in terms parameter
+ * 2) pmu format definitions - specified by pmu parameter
+ */
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+		     struct list_head *head_terms)
+{
+	attr->type = pmu->type;
+	return pmu_config(&pmu->format, attr, head_terms);
+}
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+			 int config, unsigned long *bits)
+{
+	struct perf_pmu__format *format;
+
+	format = zalloc(sizeof(*format));
+	if (!format)
+		return -ENOMEM;
+
+	format->name = strdup(name);
+	format->value = config;
+	memcpy(format->bits, bits, sizeof(format->bits));
+
+	list_add_tail(&format->list, list);
+	return 0;
+}
+
+void perf_pmu__set_format(unsigned long *bits, long from, long to)
+{
+	long b;
+
+	if (!to)
+		to = from;
+
+	memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS));
+	for (b = from; b <= to; b++)
+		set_bit(b, bits);
+}
+
+/* Simulated format definitions. */
+static struct test_format {
+	const char *name;
+	const char *value;
+} test_formats[] = {
+	{ "krava01", "config:0-1,62-63\n", },
+	{ "krava02", "config:10-17\n", },
+	{ "krava03", "config:5\n", },
+	{ "krava11", "config1:0,2,4,6,8,20-28\n", },
+	{ "krava12", "config1:63\n", },
+	{ "krava13", "config1:45-47\n", },
+	{ "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", },
+	{ "krava22", "config2:8,18,48,58\n", },
+	{ "krava23", "config2:28-29,38\n", },
+};
+
+#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format))
+
+/* Simulated users input. */
+static struct parse_events__term test_terms[] = {
+	{
+		.config  = (char *) "krava01",
+		.val.num = 15,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava02",
+		.val.num = 170,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava03",
+		.val.num = 1,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava11",
+		.val.num = 27,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava12",
+		.val.num = 1,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava13",
+		.val.num = 2,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava21",
+		.val.num = 119,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava22",
+		.val.num = 11,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+	{
+		.config  = (char *) "krava23",
+		.val.num = 2,
+		.type    = PARSE_EVENTS__TERM_TYPE_NUM,
+	},
+};
+#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
+
+/*
+ * Prepare format directory data, exported by kernel
+ * at /sys/bus/event_source/devices/<dev>/format.
+ */
+static char *test_format_dir_get(void)
+{
+	static char dir[PATH_MAX];
+	unsigned int i;
+
+	snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX");
+	if (!mkdtemp(dir))
+		return NULL;
+
+	for (i = 0; i < TEST_FORMATS_CNT; i++) {
+		static char name[PATH_MAX];
+		struct test_format *format = &test_formats[i];
+		FILE *file;
+
+		snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
+
+		file = fopen(name, "w");
+		if (!file)
+			return NULL;
+
+		if (1 != fwrite(format->value, strlen(format->value), 1, file))
+			break;
+
+		fclose(file);
+	}
+
+	return dir;
+}
+
+/* Cleanup format directory. */
+static int test_format_dir_put(char *dir)
+{
+	char buf[PATH_MAX];
+	snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir);
+	if (system(buf))
+		return -1;
+
+	snprintf(buf, PATH_MAX, "rmdir %s\n", dir);
+	return system(buf);
+}
+
+static struct list_head *test_terms_list(void)
+{
+	static LIST_HEAD(terms);
+	unsigned int i;
+
+	for (i = 0; i < TERMS_CNT; i++)
+		list_add_tail(&test_terms[i].list, &terms);
+
+	return &terms;
+}
+
+#undef TERMS_CNT
+
+int perf_pmu__test(void)
+{
+	char *format = test_format_dir_get();
+	LIST_HEAD(formats);
+	struct list_head *terms = test_terms_list();
+	int ret;
+
+	if (!format)
+		return -EINVAL;
+
+	do {
+		struct perf_event_attr attr;
+
+		memset(&attr, 0, sizeof(attr));
+
+		ret = pmu_format_parse(format, &formats);
+		if (ret)
+			break;
+
+		ret = pmu_config(&formats, &attr, terms);
+		if (ret)
+			break;
+
+		ret = -EINVAL;
+
+		if (attr.config  != 0xc00000000002a823)
+			break;
+		if (attr.config1 != 0x8000400000000145)
+			break;
+		if (attr.config2 != 0x0400000020041d07)
+			break;
+
+		ret = 0;
+	} while (0);
+
+	test_format_dir_put(format);
+	return ret;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
new file mode 100644
index 0000000..68c0db9
--- /dev/null
+++ b/tools/perf/util/pmu.h
@@ -0,0 +1,41 @@
+#ifndef __PMU_H
+#define __PMU_H
+
+#include <linux/bitops.h>
+#include "../../../include/linux/perf_event.h"
+
+enum {
+	PERF_PMU_FORMAT_VALUE_CONFIG,
+	PERF_PMU_FORMAT_VALUE_CONFIG1,
+	PERF_PMU_FORMAT_VALUE_CONFIG2,
+};
+
+#define PERF_PMU_FORMAT_BITS 64
+
+struct perf_pmu__format {
+	char *name;
+	int value;
+	DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+	struct list_head list;
+};
+
+struct perf_pmu {
+	char *name;
+	__u32 type;
+	struct list_head format;
+	struct list_head list;
+};
+
+struct perf_pmu *perf_pmu__find(char *name);
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+		     struct list_head *head_terms);
+
+int perf_pmu_wrap(void);
+void perf_pmu_error(struct list_head *list, char *name, char const *msg);
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+			 int config, unsigned long *bits);
+void perf_pmu__set_format(unsigned long *bits, long from, long to);
+
+int perf_pmu__test(void);
+#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l
new file mode 100644
index 0000000..a15d9fb
--- /dev/null
+++ b/tools/perf/util/pmu.l
@@ -0,0 +1,43 @@
+%option prefix="perf_pmu_"
+
+%{
+#include <stdlib.h>
+#include <linux/bitops.h>
+#include "pmu.h"
+#include "pmu-bison.h"
+
+static int value(int base)
+{
+	long num;
+
+	errno = 0;
+	num = strtoul(perf_pmu_text, NULL, base);
+	if (errno)
+		return PP_ERROR;
+
+	perf_pmu_lval.num = num;
+	return PP_VALUE;
+}
+
+%}
+
+num_dec         [0-9]+
+
+%%
+
+{num_dec}	{ return value(10); }
+config		{ return PP_CONFIG; }
+config1		{ return PP_CONFIG1; }
+config2		{ return PP_CONFIG2; }
+-		{ return '-'; }
+:		{ return ':'; }
+,		{ return ','; }
+.		{ ; }
+\n		{ ; }
+
+%%
+
+int perf_pmu_wrap(void)
+{
+	return 1;
+}
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
new file mode 100644
index 0000000..20ea77e
--- /dev/null
+++ b/tools/perf/util/pmu.y
@@ -0,0 +1,93 @@
+
+%name-prefix "perf_pmu_"
+%parse-param {struct list_head *format}
+%parse-param {char *name}
+
+%{
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <string.h>
+#include "pmu.h"
+
+extern int perf_pmu_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+        if (val) \
+                YYABORT; \
+} while (0)
+
+%}
+
+%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
+%token PP_VALUE PP_ERROR
+%type <num> PP_VALUE
+%type <bits> bit_term
+%type <bits> bits
+
+%union
+{
+	unsigned long num;
+	DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+}
+
+%%
+
+format:
+format format_term
+|
+format_term
+
+format_term:
+PP_CONFIG ':' bits
+{
+	ABORT_ON(perf_pmu__new_format(format, name,
+				      PERF_PMU_FORMAT_VALUE_CONFIG,
+				      $3));
+}
+|
+PP_CONFIG1 ':' bits
+{
+	ABORT_ON(perf_pmu__new_format(format, name,
+				      PERF_PMU_FORMAT_VALUE_CONFIG1,
+				      $3));
+}
+|
+PP_CONFIG2 ':' bits
+{
+	ABORT_ON(perf_pmu__new_format(format, name,
+				      PERF_PMU_FORMAT_VALUE_CONFIG2,
+				      $3));
+}
+
+bits:
+bits ',' bit_term
+{
+	bitmap_or($$, $1, $3, 64);
+}
+|
+bit_term
+{
+	memcpy($$, $1, sizeof($1));
+}
+
+bit_term:
+PP_VALUE '-' PP_VALUE
+{
+	perf_pmu__set_format($$, $1, $3);
+}
+|
+PP_VALUE
+{
+	perf_pmu__set_format($$, $1, 0);
+}
+
+%%
+
+void perf_pmu_error(struct list_head *list __used,
+		    char *name __used,
+		    char const *msg __used)
+{
+}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 2cc162d..d448984 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -972,10 +972,12 @@
 	struct dwarf_callback_param *param = data;
 	struct probe_finder *pf = param->data;
 	struct perf_probe_point *pp = &pf->pev->point;
+	Dwarf_Attribute attr;
 
 	/* Check tag and diename */
 	if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
-	    !die_compare_name(sp_die, pp->function))
+	    !die_compare_name(sp_die, pp->function) ||
+	    dwarf_attr(sp_die, DW_AT_declaration, &attr))
 		return DWARF_CB_OK;
 
 	/* Check declared file */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 002ebbf..9412e3b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -140,6 +140,7 @@
 	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
 	INIT_LIST_HEAD(&self->ordered_samples.to_free);
 	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
+	hists__init(&self->hists);
 
 	if (mode == O_RDONLY) {
 		if (perf_session__open(self, force) < 0)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 5dd83c3..c0a028c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,6 +1,5 @@
 #include <dirent.h>
 #include <errno.h>
-#include <libgen.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
@@ -51,6 +50,8 @@
 
 int dso__name_len(const struct dso *dso)
 {
+	if (!dso)
+		return strlen("[unknown]");
 	if (verbose)
 		return dso->long_name_len;
 
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index a4088ce..dfd1bd8 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -722,7 +722,7 @@
 static int event_read_id(void)
 {
 	char *token;
-	int id;
+	int id = -1;
 
 	if (read_expected_item(EVENT_ITEM, "ID") < 0)
 		return -1;
@@ -731,15 +731,13 @@
 		return -1;
 
 	if (read_expect_type(EVENT_ITEM, &token) < 0)
-		goto fail;
+		goto free;
 
 	id = strtoul(token, NULL, 0);
+
+ free:
 	free_token(token);
 	return id;
-
- fail:
-	free_token(token);
-	return -1;
 }
 
 static int field_is_string(struct format_field *field)
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
index 84d761b..6ee82f6 100644
--- a/tools/perf/util/ui/browser.h
+++ b/tools/perf/util/ui/browser.h
@@ -49,6 +49,8 @@
 			const char *format, ...);
 int ui_browser__help_window(struct ui_browser *browser, const char *text);
 bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
+int ui_browser__input_window(const char *title, const char *text, char *input,
+			     const char *exit_msg, int delay_sec);
 
 void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
 unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index fa530fc..d7a1c4a 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -879,6 +879,7 @@
 	char *options[16];
 	int nr_options = 0;
 	int key = -1;
+	char buf[64];
 
 	if (browser == NULL)
 		return -1;
@@ -933,6 +934,16 @@
 			goto zoom_dso;
 		case 't':
 			goto zoom_thread;
+		case 's':
+			if (ui_browser__input_window("Symbol to show",
+					"Please enter the name of symbol you want to see",
+					buf, "ENTER: OK, ESC: Cancel",
+					delay_secs * 2) == K_ENTER) {
+				self->symbol_filter_str = *buf ? buf : NULL;
+				hists__filter_by_symbol(self);
+				hist_browser__reset(browser);
+			}
+			continue;
 		case K_F1:
 		case 'h':
 		case '?':
@@ -950,7 +961,8 @@
 					"C             Collapse all callchains\n"
 					"E             Expand all callchains\n"
 					"d             Zoom into current DSO\n"
-					"t             Zoom into current Thread");
+					"t             Zoom into current Thread\n"
+					"s             Filter symbol by name");
 			continue;
 		case K_ENTER:
 		case K_RIGHT:
diff --git a/tools/perf/util/ui/keysyms.h b/tools/perf/util/ui/keysyms.h
index 3458b19..809eca5 100644
--- a/tools/perf/util/ui/keysyms.h
+++ b/tools/perf/util/ui/keysyms.h
@@ -16,6 +16,8 @@
 #define K_TAB	'\t'
 #define K_UNTAB	SL_KEY_UNTAB
 #define K_UP	SL_KEY_UP
+#define K_BKSPC 0x7f
+#define K_DEL	SL_KEY_DELETE
 
 /* Not really keys */
 #define K_TIMER	 -1
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 45daa7c..ad4374a 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -69,6 +69,88 @@
 	return popup_menu__run(&menu);
 }
 
+int ui_browser__input_window(const char *title, const char *text, char *input,
+			     const char *exit_msg, int delay_secs)
+{
+	int x, y, len, key;
+	int max_len = 60, nr_lines = 0;
+	static char buf[50];
+	const char *t;
+
+	t = text;
+	while (1) {
+		const char *sep = strchr(t, '\n');
+
+		if (sep == NULL)
+			sep = strchr(t, '\0');
+		len = sep - t;
+		if (max_len < len)
+			max_len = len;
+		++nr_lines;
+		if (*sep == '\0')
+			break;
+		t = sep + 1;
+	}
+
+	max_len += 2;
+	nr_lines += 8;
+	y = SLtt_Screen_Rows / 2 - nr_lines / 2;
+	x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+	SLsmg_set_color(0);
+	SLsmg_draw_box(y, x++, nr_lines, max_len);
+	if (title) {
+		SLsmg_gotorc(y, x + 1);
+		SLsmg_write_string((char *)title);
+	}
+	SLsmg_gotorc(++y, x);
+	nr_lines -= 7;
+	max_len -= 2;
+	SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+				   nr_lines, max_len, 1);
+	y += nr_lines;
+	len = 5;
+	while (len--) {
+		SLsmg_gotorc(y + len - 1, x);
+		SLsmg_write_nstring((char *)" ", max_len);
+	}
+	SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
+
+	SLsmg_gotorc(y + 3, x);
+	SLsmg_write_nstring((char *)exit_msg, max_len);
+	SLsmg_refresh();
+
+	x += 2;
+	len = 0;
+	key = ui__getch(delay_secs);
+	while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+		if (key == K_BKSPC) {
+			if (len == 0)
+				goto next_key;
+			SLsmg_gotorc(y, x + --len);
+			SLsmg_write_char(' ');
+		} else {
+			buf[len] = key;
+			SLsmg_gotorc(y, x + len++);
+			SLsmg_write_char(key);
+		}
+		SLsmg_refresh();
+
+		/* XXX more graceful overflow handling needed */
+		if (len == sizeof(buf) - 1) {
+			ui_helpline__push("maximum size of symbol name reached!");
+			key = K_ENTER;
+			break;
+		}
+next_key:
+		key = ui__getch(delay_secs);
+	}
+
+	buf[len] = '\0';
+	strncpy(input, buf, len+1);
+	return key;
+}
+
 int ui__question_window(const char *title, const char *text,
 			const char *exit_msg, int delay_secs)
 {
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 555c69a..adf175f 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -4,11 +4,13 @@
 .SH SYNOPSIS
 .ft B
 .B turbostat
+.RB [ "\-s" ]
 .RB [ "\-v" ]
 .RB [ "\-M MSR#" ]
 .RB command
 .br
 .B turbostat
+.RB [ "\-s" ]
 .RB [ "\-v" ]
 .RB [ "\-M MSR#" ]
 .RB [ "\-i interval_sec" ]
@@ -25,6 +27,8 @@
 on processors that additionally support C-state residency counters.
 
 .SS Options
+The \fB-s\fP option prints only a 1-line summary for each sample interval.
+.PP
 The \fB-v\fP option increases verbosity.
 .PP
 The \fB-M MSR#\fP option dumps the specified MSR,
@@ -39,13 +43,14 @@
 .SH FIELD DESCRIPTIONS
 .nf
 \fBpk\fP processor package number.
-\fBcr\fP processor core number.
+\fBcor\fP processor core number.
 \fBCPU\fP Linux CPU (logical processor) number.
+Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
 \fB%c0\fP percent of the interval that the CPU retired instructions.
 \fBGHz\fP average clock rate while the CPU was in c0 state.
 \fBTSC\fP average GHz that the TSC ran during the entire interval.
-\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
-\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
+\fB%c1, %c3, %c6, %c7\fP show the percentage residency in hardware core idle states.
+\fB%pc2, %pc3, %pc6, %pc7\fP percentage residency in hardware package idle states.
 .fi
 .PP
 .SH EXAMPLE
@@ -53,25 +58,37 @@
 (override interval with "-i sec" option, or specify a command
 for turbostat to fork).
 
-The first row of statistics reflect the average for the entire system.
+The first row of statistics is a summary for the entire system.
+Note that the summary is a weighted average.
 Subsequent rows show per-CPU statistics.
 
 .nf
 [root@x980]# ./turbostat
-cr   CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
-          0.04 1.62 3.38   0.11   0.00  99.85   0.00  95.07
-  0   0   0.04 1.62 3.38   0.06   0.00  99.90   0.00  95.07
-  0   6   0.02 1.62 3.38   0.08   0.00  99.90   0.00  95.07
-  1   2   0.10 1.62 3.38   0.29   0.00  99.61   0.00  95.07
-  1   8   0.11 1.62 3.38   0.28   0.00  99.61   0.00  95.07
-  2   4   0.01 1.62 3.38   0.01   0.00  99.98   0.00  95.07
-  2  10   0.01 1.61 3.38   0.02   0.00  99.98   0.00  95.07
-  8   1   0.07 1.62 3.38   0.15   0.00  99.78   0.00  95.07
-  8   7   0.03 1.62 3.38   0.19   0.00  99.78   0.00  95.07
-  9   3   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
-  9   9   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
- 10   5   0.01 1.62 3.38   0.13   0.00  99.86   0.00  95.07
- 10  11   0.08 1.62 3.38   0.05   0.00  99.86   0.00  95.07
+cor CPU    %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
+          0.60 1.63 3.38   2.91   0.00  96.49   0.00  76.64
+  0   0   0.59 1.62 3.38   4.51   0.00  94.90   0.00  76.64
+  0   6   1.13 1.64 3.38   3.97   0.00  94.90   0.00  76.64
+  1   2   0.08 1.62 3.38   0.07   0.00  99.85   0.00  76.64
+  1   8   0.03 1.62 3.38   0.12   0.00  99.85   0.00  76.64
+  2   4   0.01 1.62 3.38   0.06   0.00  99.93   0.00  76.64
+  2  10   0.04 1.62 3.38   0.02   0.00  99.93   0.00  76.64
+  8   1   2.85 1.62 3.38  11.71   0.00  85.44   0.00  76.64
+  8   7   1.98 1.62 3.38  12.58   0.00  85.44   0.00  76.64
+  9   3   0.36 1.62 3.38   0.71   0.00  98.93   0.00  76.64
+  9   9   0.09 1.62 3.38   0.98   0.00  98.93   0.00  76.64
+ 10   5   0.03 1.62 3.38   0.09   0.00  99.87   0.00  76.64
+ 10  11   0.07 1.62 3.38   0.06   0.00  99.87   0.00  76.64
+.fi
+.SH SUMMARY EXAMPLE
+The "-s" option prints the column headers just once,
+and then the one line system summary for each sample interval.
+
+.nf
+[root@x980]# ./turbostat -s
+   %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
+  0.61 1.89 3.38   5.95   0.00  93.44   0.00  66.33
+  0.52 1.62 3.38   6.83   0.00  92.65   0.00  61.11
+  0.62 1.92 3.38   5.47   0.00  93.91   0.00  67.31
 .fi
 .SH VERBOSE EXAMPLE
 The "-v" option adds verbosity to the output:
@@ -101,33 +118,33 @@
 
 .nf
 [root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
-
-^Ccr   CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
-           8.49 3.63 3.38  16.23   0.66  74.63   0.00   0.00
-   0   0   1.22 3.62 3.38  32.18   0.00  66.60   0.00   0.00
-   0   6   0.40 3.61 3.38  33.00   0.00  66.60   0.00   0.00
-   1   2   0.11 3.14 3.38   0.19   3.95  95.75   0.00   0.00
-   1   8   0.05 2.88 3.38   0.25   3.95  95.75   0.00   0.00
-   2   4   0.00 3.13 3.38   0.02   0.00  99.98   0.00   0.00
-   2  10   0.00 3.09 3.38   0.02   0.00  99.98   0.00   0.00
-   8   1   0.04 3.50 3.38  14.43   0.00  85.54   0.00   0.00
-   8   7   0.03 2.98 3.38  14.43   0.00  85.54   0.00   0.00
-   9   3   0.00 3.16 3.38 100.00   0.00   0.00   0.00   0.00
-   9   9  99.93 3.63 3.38   0.06   0.00   0.00   0.00   0.00
-  10   5   0.01 2.82 3.38   0.08   0.00  99.91   0.00   0.00
-  10  11   0.02 3.36 3.38   0.06   0.00  99.91   0.00   0.00
-6.950866 sec
+^C
+cor CPU    %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
+          8.63 3.64 3.38  14.46   0.49  76.42   0.00   0.00
+  0   0   0.34 3.36 3.38  99.66   0.00   0.00   0.00   0.00
+  0   6  99.96 3.64 3.38   0.04   0.00   0.00   0.00   0.00
+  1   2   0.14 3.50 3.38   1.75   2.04  96.07   0.00   0.00
+  1   8   0.38 3.57 3.38   1.51   2.04  96.07   0.00   0.00
+  2   4   0.01 2.65 3.38   0.06   0.00  99.93   0.00   0.00
+  2  10   0.03 2.12 3.38   0.04   0.00  99.93   0.00   0.00
+  8   1   0.91 3.59 3.38  35.27   0.92  62.90   0.00   0.00
+  8   7   1.61 3.63 3.38  34.57   0.92  62.90   0.00   0.00
+  9   3   0.04 3.38 3.38   0.20   0.00  99.76   0.00   0.00
+  9   9   0.04 3.29 3.38   0.20   0.00  99.76   0.00   0.00
+ 10   5   0.03 3.08 3.38   0.12   0.00  99.85   0.00   0.00
+ 10  11   0.05 3.07 3.38   0.10   0.00  99.85   0.00   0.00
+4.907015 sec
 
 .fi
-Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
+Above the cycle soaker drives cpu6 up 3.6 Ghz turbo limit
 while the other processors are generally in various states of idle.
 
-Note that cpu3 is an HT sibling sharing core9
-with cpu9, and thus it is unable to get to an idle state
-deeper than c1 while cpu9 is busy.
+Note that cpu0 is an HT sibling sharing core0
+with cpu6, and thus it is unable to get to an idle state
+deeper than c1 while cpu6 is busy.
 
-Note that turbostat reports average GHz of 3.61, while
-the arithmetic average of the GHz column above is 3.24.
+Note that turbostat reports average GHz of 3.64, while
+the arithmetic average of the GHz column above is lower.
 This is a weighted average, where the weight is %c0.  ie. it is the total number of
 un-halted cycles elapsed per time divided by the number of CPUs.
 .SH NOTES
@@ -167,6 +184,6 @@
 .SH "SEE ALSO"
 msr(4), vmstat(8)
 .PP
-.SH AUTHORS
+.SH AUTHOR
 .nf
 Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 310d3dd..ab2f682 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2,7 +2,7 @@
  * turbostat -- show CPU frequency and C-state residency
  * on modern Intel turbo-capable processors.
  *
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2012 Intel Corporation.
  * Len Brown <len.brown@intel.com>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -19,6 +19,7 @@
  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
+#define _GNU_SOURCE
 #include <stdio.h>
 #include <unistd.h>
 #include <sys/types.h>
@@ -32,6 +33,7 @@
 #include <dirent.h>
 #include <string.h>
 #include <ctype.h>
+#include <sched.h>
 
 #define MSR_TSC	0x10
 #define MSR_NEHALEM_PLATFORM_INFO	0xCE
@@ -49,6 +51,7 @@
 char *proc_stat = "/proc/stat";
 unsigned int interval_sec = 5;	/* set with -i interval_sec */
 unsigned int verbose;		/* set with -v */
+unsigned int summary_only;	/* set with -s */
 unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
@@ -68,9 +71,10 @@
 int aperf_mperf_unstable;
 int backwards_count;
 char *progname;
-int need_reinitialize;
 
 int num_cpus;
+cpu_set_t *cpu_mask;
+size_t cpu_mask_size;
 
 struct counters {
 	unsigned long long tsc;		/* per thread */
@@ -99,44 +103,76 @@
 struct timeval tv_odd;
 struct timeval tv_delta;
 
-unsigned long long get_msr(int cpu, off_t offset)
+/*
+ * cpu_mask_init(ncpus)
+ *
+ * allocate and clear cpu_mask
+ * set cpu_mask_size
+ */
+void cpu_mask_init(int ncpus)
+{
+	cpu_mask = CPU_ALLOC(ncpus);
+	if (cpu_mask == NULL) {
+		perror("CPU_ALLOC");
+		exit(3);
+	}
+	cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
+	CPU_ZERO_S(cpu_mask_size, cpu_mask);
+}
+
+void cpu_mask_uninit()
+{
+	CPU_FREE(cpu_mask);
+	cpu_mask = NULL;
+	cpu_mask_size = 0;
+}
+
+int cpu_migrate(int cpu)
+{
+	CPU_ZERO_S(cpu_mask_size, cpu_mask);
+	CPU_SET_S(cpu, cpu_mask_size, cpu_mask);
+	if (sched_setaffinity(0, cpu_mask_size, cpu_mask) == -1)
+		return -1;
+	else
+		return 0;
+}
+
+int get_msr(int cpu, off_t offset, unsigned long long *msr)
 {
 	ssize_t retval;
-	unsigned long long msr;
 	char pathname[32];
 	int fd;
 
 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
 	fd = open(pathname, O_RDONLY);
-	if (fd < 0) {
-		perror(pathname);
-		need_reinitialize = 1;
-		return 0;
-	}
+	if (fd < 0)
+		return -1;
 
-	retval = pread(fd, &msr, sizeof msr, offset);
-	if (retval != sizeof msr) {
-		fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
-			cpu, offset, retval);
-		exit(-2);
-	}
-
+	retval = pread(fd, msr, sizeof *msr, offset);
 	close(fd);
-	return msr;
+
+	if (retval != sizeof *msr)
+		return -1;
+
+	return 0;
 }
 
 void print_header(void)
 {
 	if (show_pkg)
 		fprintf(stderr, "pk");
+	if (show_pkg)
+		fprintf(stderr, " ");
 	if (show_core)
-		fprintf(stderr, " cr");
+		fprintf(stderr, "cor");
 	if (show_cpu)
 		fprintf(stderr, " CPU");
+	if (show_pkg || show_core || show_cpu)
+		fprintf(stderr, " ");
 	if (do_nhm_cstates)
-		fprintf(stderr, "    %%c0 ");
+		fprintf(stderr, "   %%c0");
 	if (has_aperf)
-		fprintf(stderr, " GHz");
+		fprintf(stderr, "  GHz");
 	fprintf(stderr, "  TSC");
 	if (do_nhm_cstates)
 		fprintf(stderr, "    %%c1");
@@ -147,13 +183,13 @@
 	if (do_snb_cstates)
 		fprintf(stderr, "    %%c7");
 	if (do_snb_cstates)
-		fprintf(stderr, "  %%pc2");
+		fprintf(stderr, "   %%pc2");
 	if (do_nhm_cstates)
-		fprintf(stderr, "  %%pc3");
+		fprintf(stderr, "   %%pc3");
 	if (do_nhm_cstates)
-		fprintf(stderr, "  %%pc6");
+		fprintf(stderr, "   %%pc6");
 	if (do_snb_cstates)
-		fprintf(stderr, "  %%pc7");
+		fprintf(stderr, "   %%pc7");
 	if (extra_msr_offset)
 		fprintf(stderr, "        MSR 0x%x ", extra_msr_offset);
 
@@ -187,6 +223,15 @@
 		dump_cnt(cnt);
 }
 
+/*
+ * column formatting convention & formats
+ * package: "pk" 2 columns %2d
+ * core: "cor" 3 columns %3d
+ * CPU: "CPU" 3 columns %3d
+ * GHz: "GHz" 3 columns %3.2
+ * TSC: "TSC" 3 columns %3.2
+ * percentage " %pc3" %6.2
+ */
 void print_cnt(struct counters *p)
 {
 	double interval_float;
@@ -196,39 +241,45 @@
 	/* topology columns, print blanks on 1st (average) line */
 	if (p == cnt_average) {
 		if (show_pkg)
+			fprintf(stderr, "  ");
+		if (show_pkg && show_core)
 			fprintf(stderr, " ");
 		if (show_core)
-			fprintf(stderr, "    ");
+			fprintf(stderr, "   ");
 		if (show_cpu)
-			fprintf(stderr, "    ");
+			fprintf(stderr, " " "   ");
 	} else {
 		if (show_pkg)
-			fprintf(stderr, "%d", p->pkg);
+			fprintf(stderr, "%2d", p->pkg);
+		if (show_pkg && show_core)
+			fprintf(stderr, " ");
 		if (show_core)
-			fprintf(stderr, "%4d", p->core);
+			fprintf(stderr, "%3d", p->core);
 		if (show_cpu)
-			fprintf(stderr, "%4d", p->cpu);
+			fprintf(stderr, " %3d", p->cpu);
 	}
 
 	/* %c0 */
 	if (do_nhm_cstates) {
+		if (show_pkg || show_core || show_cpu)
+			fprintf(stderr, " ");
 		if (!skip_c0)
-			fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc);
+			fprintf(stderr, "%6.2f", 100.0 * p->mperf/p->tsc);
 		else
-			fprintf(stderr, "   ****");
+			fprintf(stderr, "  ****");
 	}
 
 	/* GHz */
 	if (has_aperf) {
 		if (!aperf_mperf_unstable) {
-			fprintf(stderr, "%5.2f",
+			fprintf(stderr, " %3.2f",
 				1.0 * p->tsc / units * p->aperf /
 				p->mperf / interval_float);
 		} else {
 			if (p->aperf > p->tsc || p->mperf > p->tsc) {
-				fprintf(stderr, " ****");
+				fprintf(stderr, " ***");
 			} else {
-				fprintf(stderr, "%4.1f*",
+				fprintf(stderr, "%3.1f*",
 					1.0 * p->tsc /
 					units * p->aperf /
 					p->mperf / interval_float);
@@ -241,7 +292,7 @@
 
 	if (do_nhm_cstates) {
 		if (!skip_c1)
-			fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
+			fprintf(stderr, " %6.2f", 100.0 * p->c1/p->tsc);
 		else
 			fprintf(stderr, "  ****");
 	}
@@ -252,13 +303,13 @@
 	if (do_snb_cstates)
 		fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
 	if (do_snb_cstates)
-		fprintf(stderr, " %5.2f", 100.0 * p->pc2/p->tsc);
+		fprintf(stderr, " %6.2f", 100.0 * p->pc2/p->tsc);
 	if (do_nhm_cstates)
-		fprintf(stderr, " %5.2f", 100.0 * p->pc3/p->tsc);
+		fprintf(stderr, " %6.2f", 100.0 * p->pc3/p->tsc);
 	if (do_nhm_cstates)
-		fprintf(stderr, " %5.2f", 100.0 * p->pc6/p->tsc);
+		fprintf(stderr, " %6.2f", 100.0 * p->pc6/p->tsc);
 	if (do_snb_cstates)
-		fprintf(stderr, " %5.2f", 100.0 * p->pc7/p->tsc);
+		fprintf(stderr, " %6.2f", 100.0 * p->pc7/p->tsc);
 	if (extra_msr_offset)
 		fprintf(stderr, "  0x%016llx", p->extra_msr);
 	putc('\n', stderr);
@@ -267,12 +318,20 @@
 void print_counters(struct counters *counters)
 {
 	struct counters *cnt;
+	static int printed;
 
-	print_header();
+
+	if (!printed || !summary_only)
+		print_header();
 
 	if (num_cpus > 1)
 		print_cnt(cnt_average);
 
+	printed = 1;
+
+	if (summary_only)
+		return;
+
 	for (cnt = counters; cnt != NULL; cnt = cnt->next)
 		print_cnt(cnt);
 
@@ -440,31 +499,51 @@
 	free(sum);
 }
 
-void get_counters(struct counters *cnt)
+int get_counters(struct counters *cnt)
 {
 	for ( ; cnt; cnt = cnt->next) {
-		cnt->tsc = get_msr(cnt->cpu, MSR_TSC);
-		if (do_nhm_cstates)
-			cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY);
-		if (do_nhm_cstates)
-			cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY);
+
+		if (cpu_migrate(cnt->cpu))
+			return -1;
+
+		if (get_msr(cnt->cpu, MSR_TSC, &cnt->tsc))
+			return -1;
+
+		if (has_aperf) {
+			if (get_msr(cnt->cpu, MSR_APERF, &cnt->aperf))
+				return -1;
+			if (get_msr(cnt->cpu, MSR_MPERF, &cnt->mperf))
+				return -1;
+		}
+
+		if (do_nhm_cstates) {
+			if (get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY, &cnt->c3))
+				return -1;
+			if (get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY, &cnt->c6))
+				return -1;
+		}
+
 		if (do_snb_cstates)
-			cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY);
-		if (has_aperf)
-			cnt->aperf = get_msr(cnt->cpu, MSR_APERF);
-		if (has_aperf)
-			cnt->mperf = get_msr(cnt->cpu, MSR_MPERF);
-		if (do_snb_cstates)
-			cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY);
-		if (do_nhm_cstates)
-			cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY);
-		if (do_nhm_cstates)
-			cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY);
-		if (do_snb_cstates)
-			cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY);
+			if (get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY, &cnt->c7))
+				return -1;
+
+		if (do_nhm_cstates) {
+			if (get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY, &cnt->pc3))
+				return -1;
+			if (get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY, &cnt->pc6))
+				return -1;
+		}
+		if (do_snb_cstates) {
+			if (get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY, &cnt->pc2))
+				return -1;
+			if (get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY, &cnt->pc7))
+				return -1;
+		}
 		if (extra_msr_offset)
-			cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset);
+			if (get_msr(cnt->cpu, extra_msr_offset, &cnt->extra_msr))
+				return -1;
 	}
+	return 0;
 }
 
 void print_nehalem_info(void)
@@ -475,7 +554,7 @@
 	if (!do_nehalem_platform_info)
 		return;
 
-	msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO);
+	get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr);
 
 	ratio = (msr >> 40) & 0xFF;
 	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
@@ -491,7 +570,7 @@
 	if (!do_nehalem_turbo_ratio_limit)
 		return;
 
-	msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT);
+	get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr);
 
 	ratio = (msr >> 24) & 0xFF;
 	if (ratio)
@@ -557,7 +636,8 @@
 		return;
 	}
 
-	show_cpu = 1;	/* there is more than one CPU */
+	if (!summary_only)
+		show_cpu = 1;	/* there is more than one CPU */
 
 	/*
 	 * insert on front of list.
@@ -575,13 +655,15 @@
 
 	while (prev->next && (prev->next->pkg < new->pkg)) {
 		prev = prev->next;
-		show_pkg = 1;	/* there is more than 1 package */
+		if (!summary_only)
+			show_pkg = 1;	/* there is more than 1 package */
 	}
 
 	while (prev->next && (prev->next->pkg == new->pkg)
 		&& (prev->next->core < new->core)) {
 		prev = prev->next;
-		show_core = 1;	/* there is more than 1 core */
+		if (!summary_only)
+			show_core = 1;	/* there is more than 1 core */
 	}
 
 	while (prev->next && (prev->next->pkg == new->pkg)
@@ -681,7 +763,7 @@
 }
 
 /*
- * run func(index, cpu) on every cpu in /proc/stat
+ * run func(pkg, core, cpu) on every cpu in /proc/stat
  */
 
 int for_all_cpus(void (func)(int, int, int))
@@ -717,18 +799,18 @@
 
 void re_initialize(void)
 {
-	printf("turbostat: topology changed, re-initializing.\n");
 	free_all_counters();
 	num_cpus = for_all_cpus(alloc_new_counters);
-	need_reinitialize = 0;
-	printf("num_cpus is now %d\n", num_cpus);
+	cpu_mask_uninit();
+	cpu_mask_init(num_cpus);
+	printf("turbostat: re-initialized with num_cpus %d\n", num_cpus);
 }
 
 void dummy(int pkg, int core, int cpu) { return; }
 /*
  * check to see if a cpu came on-line
  */
-void verify_num_cpus(void)
+int verify_num_cpus(void)
 {
 	int new_num_cpus;
 
@@ -738,8 +820,9 @@
 		if (verbose)
 			printf("num_cpus was %d, is now  %d\n",
 				num_cpus, new_num_cpus);
-		need_reinitialize = 1;
+		return -1;
 	}
+	return 0;
 }
 
 void turbostat_loop()
@@ -749,25 +832,25 @@
 	gettimeofday(&tv_even, (struct timezone *)NULL);
 
 	while (1) {
-		verify_num_cpus();
-		if (need_reinitialize) {
+		if (verify_num_cpus()) {
 			re_initialize();
 			goto restart;
 		}
 		sleep(interval_sec);
-		get_counters(cnt_odd);
+		if (get_counters(cnt_odd)) {
+			re_initialize();
+			goto restart;
+		}
 		gettimeofday(&tv_odd, (struct timezone *)NULL);
-
 		compute_delta(cnt_odd, cnt_even, cnt_delta);
 		timersub(&tv_odd, &tv_even, &tv_delta);
 		compute_average(cnt_delta, cnt_average);
 		print_counters(cnt_delta);
-		if (need_reinitialize) {
+		sleep(interval_sec);
+		if (get_counters(cnt_even)) {
 			re_initialize();
 			goto restart;
 		}
-		sleep(interval_sec);
-		get_counters(cnt_even);
 		gettimeofday(&tv_even, (struct timezone *)NULL);
 		compute_delta(cnt_even, cnt_odd, cnt_delta);
 		timersub(&tv_even, &tv_odd, &tv_delta);
@@ -953,6 +1036,7 @@
 	check_super_user();
 
 	num_cpus = for_all_cpus(alloc_new_counters);
+	cpu_mask_init(num_cpus);
 
 	if (verbose)
 		print_nehalem_info();
@@ -1005,8 +1089,11 @@
 
 	progname = argv[0];
 
-	while ((opt = getopt(argc, argv, "+vi:M:")) != -1) {
+	while ((opt = getopt(argc, argv, "+svi:M:")) != -1) {
 		switch (opt) {
+		case 's':
+			summary_only++;
+			break;
 		case 'v':
 			verbose++;
 			break;